entry_point
stringlengths 1
65
| original_triton_code
stringlengths 4.5k
619k
| python_code
stringlengths 208
60.9k
| triton_code
stringlengths 1.15k
275k
| repo_name
stringlengths 7
115
| module_name
stringlengths 1
65
| synthetic
bool 1
class | uuid
int64 0
18.5k
| licenses
sequencelengths 1
6
| stars
int64 0
19.8k
| sha
stringlengths 40
40
| repo_link
stringlengths 72
180
| pytorch_code
stringlengths 200
4.05k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|
DiceLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/qx/cqxxiluz2h2xs6bgpkfoeymswpyft6ftign7rfvumay4iu2ezaqa.py
# Topologically Sorted Source Nodes: [mul, intersection, mul_3, add, mul_1, A_sum, mul_2, B_sum, add_1, add_2, truediv, sub], Original ATen: [aten.mul, aten.sum, aten.add, aten.div, aten.rsub]
# Source node to ATen node mapping:
# A_sum => sum_2
# B_sum => sum_3
# add => add
# add_1 => add_1
# add_2 => add_2
# intersection => sum_1
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# mul_3 => mul_3
# sub => sub
# truediv => div
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %view_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 2.0), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, 1.0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %view), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_1,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %view_1), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_2,), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, %sum_3), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, 1.0), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add, %add_2), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div), kwargs = {})
triton_per_fused_add_div_mul_rsub_sum_0 = async_compile.triton('triton_per_fused_add_div_mul_rsub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mul_rsub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mul_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tmp0 * tmp0
tmp7 = tl.broadcast_to(tmp6, [RBLOCK])
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0))
tmp10 = tmp1 * tmp1
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 2.0
tmp15 = tmp5 * tmp14
tmp16 = 1.0
tmp17 = tmp15 + tmp16
tmp18 = tmp9 + tmp13
tmp19 = tmp18 + tmp16
tmp20 = tmp17 / tmp19
tmp21 = tmp16 - tmp20
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp21, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [mul, intersection, mul_3, add, mul_1, A_sum, mul_2, B_sum, add_1, add_2, truediv, sub], Original ATen: [aten.mul, aten.sum, aten.add, aten.div, aten.rsub]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_mul_rsub_sum_0.run(buf3, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.optim.lr_scheduler
import torch.utils.data
from torchvision.transforms import *
class DiceLoss(torch.nn.Module):
def init(self):
super(DiceLoss, self).init()
def forward(self, pred, target):
smooth = 1.0
iflat = pred.contiguous().view(-1)
tflat = target.contiguous().view(-1)
intersection = (iflat * tflat).sum()
A_sum = torch.sum(iflat * iflat)
B_sum = torch.sum(tflat * tflat)
return 1 - (2.0 * intersection + smooth) / (A_sum + B_sum + smooth)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.optim.lr_scheduler
import torch.utils.data
from torchvision.transforms import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_mul_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tmp0 * tmp0
tmp7 = tl.broadcast_to(tmp6, [RBLOCK])
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0))
tmp10 = tmp1 * tmp1
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 2.0
tmp15 = tmp5 * tmp14
tmp16 = 1.0
tmp17 = tmp15 + tmp16
tmp18 = tmp9 + tmp13
tmp19 = tmp18 + tmp16
tmp20 = tmp17 / tmp19
tmp21 = tmp16 - tmp20
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp21, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_div_mul_rsub_sum_0[grid(1)](buf3, arg0_1,
arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf3,
class DiceLossNew(torch.nn.Module):
def init(self):
super(DiceLossNew, self).init()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| csharpshooter/DeepLearning | DiceLoss | false | 1,750 | [
"MIT"
] | 0 | c1d20660c32076468970f7376931e1fcd0d2644e | https://github.com/csharpshooter/DeepLearning/tree/c1d20660c32076468970f7376931e1fcd0d2644e | import torch
import torch.optim.lr_scheduler
import torch.utils.data
from torchvision.transforms import *
class Model(torch.nn.Module):
def init(self):
super(DiceLoss, self).init()
def forward(self, pred, target):
smooth = 1.0
iflat = pred.contiguous().view(-1)
tflat = target.contiguous().view(-1)
intersection = (iflat * tflat).sum()
A_sum = torch.sum(iflat * iflat)
B_sum = torch.sum(tflat * tflat)
return 1 - (2.0 * intersection + smooth) / (A_sum + B_sum + smooth)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
WeightedFeatureFusion | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/fs/cfs4xkoaiu25wxx5ko6j355loos24sadbddfu2644hsgmchy36go.py
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.add]
# Source node to ATen node mapping:
# x => add
# x_1 => add_1
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, %select), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %select_1), kwargs = {})
triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (256 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tmp2 + tmp1
tl.store(out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (5, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_0.run(arg0_1, arg1_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
del arg1_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((5, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.optim.lr_scheduler
import torch.utils.data
from torchvision.transforms import *
class WeightedFeatureFusion(nn.Module):
def __init__(self, layers, weight=False):
super(WeightedFeatureFusion, self).__init__()
self.layers = layers
self.weight = weight
self.n = len(layers) + 1
if weight:
self.w = nn.Parameter(torch.zeros(self.n), requires_grad=True)
def forward(self, x, outputs):
if self.weight:
w = torch.sigmoid(self.w) * (2 / self.n)
x = x * w[0]
nx = x.shape[1]
for i in range(self.n - 1):
a = outputs[self.layers[i]] * w[i + 1] if self.weight else outputs[
self.layers[i]]
na = a.shape[1]
if nx == na:
x = x + a
elif nx > na:
x[:, :na] = x[:, :na] + a
else:
x = x + a[:, :nx]
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([5, 4, 4, 4])]
def get_init_inputs():
return [[], {'layers': [4, 4]}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.optim.lr_scheduler
import torch.utils.data
from torchvision.transforms import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + (256 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tmp2 + tmp1
tl.store(out_ptr0 + x2, tmp3, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (5, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK
=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class WeightedFeatureFusionNew(nn.Module):
def __init__(self, layers, weight=False):
super(WeightedFeatureFusionNew, self).__init__()
self.layers = layers
self.weight = weight
self.n = len(layers) + 1
if weight:
self.w = nn.Parameter(torch.zeros(self.n), requires_grad=True)
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| csharpshooter/DeepLearning | WeightedFeatureFusion | false | 1,751 | [
"MIT"
] | 0 | c1d20660c32076468970f7376931e1fcd0d2644e | https://github.com/csharpshooter/DeepLearning/tree/c1d20660c32076468970f7376931e1fcd0d2644e | import torch
import torch.nn as nn
import torch.optim.lr_scheduler
import torch.utils.data
from torchvision.transforms import *
class Model(nn.Module):
def __init__(self, layers, weight=False):
super().__init__()
self.layers = layers
self.weight = weight
self.n = len(layers) + 1
if weight:
self.w = nn.Parameter(torch.zeros(self.n), requires_grad=True)
def forward(self, x, outputs):
if self.weight:
w = torch.sigmoid(self.w) * (2 / self.n)
x = x * w[0]
nx = x.shape[1]
for i in range(self.n - 1):
a = outputs[self.layers[i]] * w[i + 1] if self.weight else outputs[
self.layers[i]]
na = a.shape[1]
if nx == na:
x = x + a
elif nx > na:
x[:, :na] = x[:, :na] + a
else:
x = x + a[:, :nx]
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([5, 4, 4, 4])]
def get_init_inputs():
return []
|
CondInjection | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/xw/cxwg3k3qmxnavr6z7eiak7zhuseu6vhubtxueqvow6moxv7a3sqa.py
# Topologically Sorted Source Nodes: [mul, add_1], Original ATen: [aten.mul, aten.add]
# Source node to ATen node mapping:
# add_1 => add_1
# mul => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, %normal_functional), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %mul), kwargs = {})
triton_poi_fused_add_mul_0 = async_compile.triton('triton_poi_fused_add_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tl.load(in_ptr2 + (x0 + (16*x2)), None, eviction_policy='evict_last')
tmp4 = tmp2 * tmp3
tmp5 = tmp0 + tmp4
tl.store(out_ptr0 + (x3), tmp5, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (256, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (1, ), (1, ))
buf0 = empty_strided_cuda((256, 1, 4, 4), (16, 16, 4, 1), torch.float32)
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [noise], Original ATen: [aten.normal_functional]
buf1 = torch.ops.aten.normal_functional.default(buf0)
del buf0
buf2 = buf1
del buf1
buf3 = empty_strided_cuda((256, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, add_1], Original ATen: [aten.mul, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_0.run(primals_1, primals_3, buf2, buf3, 16384, grid=grid(16384), stream=stream0)
del primals_1
del primals_3
return (buf3, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((256, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.cpp_extension
class CondInjection(nn.Module):
def __init__(self):
super().__init__()
self.weight = nn.Parameter(torch.zeros(1))
def forward(self, image, labels, noise=None):
if noise is None:
batch, _, height, width = image.shape
noise = image.new_empty(batch, 1, height, width).normal_()
labels = labels.view(-1, 1, 1, 1)
batch, _, height, width = image.shape
image.new_ones(batch, 1, height, width) / (labels + 1)
return image + self.weight * noise
def get_inputs():
return [torch.rand([256, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.cpp_extension
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tl.load(in_ptr2 + (x0 + 16 * x2), None, eviction_policy='evict_last'
)
tmp4 = tmp2 * tmp3
tmp5 = tmp0 + tmp4
tl.store(out_ptr0 + x3, tmp5, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (256, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (1,), (1,))
buf0 = empty_strided_cuda((256, 1, 4, 4), (16, 16, 4, 1), torch.float32)
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = torch.ops.aten.normal_functional.default(buf0)
del buf0
buf2 = buf1
del buf1
buf3 = empty_strided_cuda((256, 4, 4, 4), (64, 16, 4, 1), torch.float32
)
get_raw_stream(0)
triton_poi_fused_add_mul_0[grid(16384)](primals_1, primals_3, buf2,
buf3, 16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_3
return buf3, buf2
class CondInjectionNew(nn.Module):
def __init__(self):
super().__init__()
self.weight = nn.Parameter(torch.zeros(1))
def forward(self, input_0, input_1):
primals_3 = self.weight
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3])
return output[0]
| crobbins327/semanticGAN_WSI | CondInjection | false | 1,752 | [
"BSD-2-Clause",
"MIT"
] | 0 | 4046ddc822f463e03952402247f79d540bf7be95 | https://github.com/crobbins327/semanticGAN_WSI/tree/4046ddc822f463e03952402247f79d540bf7be95 | import torch
import torch.nn as nn
import torch.utils.cpp_extension
class Model(nn.Module):
def __init__(self):
super().__init__()
self.weight = nn.Parameter(torch.zeros(1))
def forward(self, image, labels, noise=None):
if noise is None:
batch, _, height, width = image.shape
noise = image.new_empty(batch, 1, height, width).normal_()
labels = labels.view(-1, 1, 1, 1)
batch, _, height, width = image.shape
image.new_ones(batch, 1, height, width) / (labels + 1)
return image + self.weight * noise
def get_inputs():
return [torch.rand([256, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
Attention | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/xl/cxlpplg3hmt6k4x6alhg4yn6eq5jppxhzrzdcvvcpbupy7pjgudn.py
# Topologically Sorted Source Nodes: [score_1, max_1, src_max, sub, out], Original ATen: [aten.div, aten.max, aten.clamp, aten.sub, aten.exp]
# Source node to ATen node mapping:
# max_1 => max_1
# out => exp
# score_1 => div
# src_max => clamp_min
# sub => sub
# Graph fragment:
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_2, 2.0), kwargs = {})
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%div, -1, True), kwargs = {})
# %clamp_min : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%getitem, 0), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div, %clamp_min), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused_clamp_div_exp_max_sub_0 = async_compile.triton('triton_poi_fused_clamp_div_exp_max_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_div_exp_max_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clamp_div_exp_max_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = 0.0
tmp15 = triton_helpers.maximum(tmp13, tmp14)
tmp16 = tmp2 - tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + (x2), tmp17, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/fi/cfijnjpiz4ruggqhl6zhj4ujuexfsuzxvpo26muzj4bggik4i5hl.py
# Topologically Sorted Source Nodes: [score_1, max_1, src_max, sum_1, sub_1, exp_1, add], Original ATen: [aten.div, aten.max, aten.clamp, aten.sum, aten.rsub, aten.exp, aten.add]
# Source node to ATen node mapping:
# add => add
# exp_1 => exp_1
# max_1 => max_1
# score_1 => div
# src_max => clamp_min
# sub_1 => sub_1
# sum_1 => sum_1
# Graph fragment:
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_2, 2.0), kwargs = {})
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%div, -1, True), kwargs = {})
# %clamp_min : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%getitem, 0), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (0, %clamp_min), kwargs = {})
# %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, %exp_1), kwargs = {})
triton_poi_fused_add_clamp_div_exp_max_rsub_sum_1 = async_compile.triton('triton_poi_fused_add_clamp_div_exp_max_rsub_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_exp_max_rsub_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_div_exp_max_rsub_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = 0.5
tmp9 = tmp7 * tmp8
tmp11 = tmp10 * tmp8
tmp12 = triton_helpers.maximum(tmp9, tmp11)
tmp14 = tmp13 * tmp8
tmp15 = triton_helpers.maximum(tmp12, tmp14)
tmp17 = tmp16 * tmp8
tmp18 = triton_helpers.maximum(tmp15, tmp17)
tmp19 = 0.0
tmp20 = triton_helpers.maximum(tmp18, tmp19)
tmp21 = tmp19 - tmp20
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp6 + tmp22
tl.store(out_ptr0 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/h7/ch7xj7aj6agx6frik7qd7tffe2pmrsjjensiwx2hy2md4kde7aj5.py
# Topologically Sorted Source Nodes: [score_1, max_1, src_max, sum_1, sub_1, exp_1, add, out_1], Original ATen: [aten.div, aten.max, aten.clamp, aten.sum, aten.rsub, aten.exp, aten.add]
# Source node to ATen node mapping:
# add => add
# exp_1 => exp_1
# max_1 => max_1
# out_1 => div_1
# score_1 => div
# src_max => clamp_min
# sub_1 => sub_1
# sum_1 => sum_1
# Graph fragment:
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_2, 2.0), kwargs = {})
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%div, -1, True), kwargs = {})
# %clamp_min : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%getitem, 0), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (0, %clamp_min), kwargs = {})
# %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, %exp_1), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %add), kwargs = {})
triton_poi_fused_add_clamp_div_exp_max_rsub_sum_2 = async_compile.triton('triton_poi_fused_add_clamp_div_exp_max_rsub_sum_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_exp_max_rsub_sum_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_div_exp_max_rsub_sum_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 / tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [score], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg0_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg1_1, (16, 4, 4), (16, 1, 4), 0), out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [score_1, max_1, src_max, sub, out], Original ATen: [aten.div, aten.max, aten.clamp, aten.sub, aten.exp]
stream0 = get_raw_stream(0)
triton_poi_fused_clamp_div_exp_max_sub_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0)
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [score_1, max_1, src_max, sum_1, sub_1, exp_1, add], Original ATen: [aten.div, aten.max, aten.clamp, aten.sum, aten.rsub, aten.exp, aten.add]
triton_poi_fused_add_clamp_div_exp_max_rsub_sum_1.run(buf1, buf0, buf2, 64, grid=grid(64), stream=stream0)
buf3 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [score_1, max_1, src_max, sum_1, sub_1, exp_1, add, out_1], Original ATen: [aten.div, aten.max, aten.clamp, aten.sum, aten.rsub, aten.exp, aten.add]
triton_poi_fused_add_clamp_div_exp_max_rsub_sum_2.run(buf3, buf2, 256, grid=grid(256), stream=stream0)
del buf2
buf4 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg2_1, (16, 4, 4), (16, 4, 1), 0), out=buf4)
del arg2_1
del buf3
return (reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn.functional as F
import torch.utils.data
def restricted_softmax(src, dim=-1, margin=0):
src_max = torch.clamp(src.max(dim=dim, keepdim=True)[0], min=0)
out = (src - src_max).exp()
out = out / (out.sum(dim=dim, keepdim=True) + (margin - src_max).exp())
return out
class Attention(torch.nn.Module):
def __init__(self, dropout=0):
super(Attention, self).__init__()
self.dropout = dropout
def forward(self, query, key, value):
assert query.dim() == key.dim() == value.dim() >= 2
assert query.size(-1) == key.size(-1)
assert key.size(-2) == value.size(-2)
score = torch.matmul(query, key.transpose(-2, -1))
score = score / math.sqrt(key.size(-1))
score = restricted_softmax(score, dim=-1)
score = F.dropout(score, p=self.dropout, training=self.training)
return torch.matmul(score, value)
def __repr__(self):
return '{}(dropout={})'.format(self.__class__.__name__, self.dropout)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clamp_div_exp_max_sub_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = 0.0
tmp15 = triton_helpers.maximum(tmp13, tmp14)
tmp16 = tmp2 - tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + x2, tmp17, xmask)
@triton.jit
def triton_poi_fused_add_clamp_div_exp_max_rsub_sum_1(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp13 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp16 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = 0.5
tmp9 = tmp7 * tmp8
tmp11 = tmp10 * tmp8
tmp12 = triton_helpers.maximum(tmp9, tmp11)
tmp14 = tmp13 * tmp8
tmp15 = triton_helpers.maximum(tmp12, tmp14)
tmp17 = tmp16 * tmp8
tmp18 = triton_helpers.maximum(tmp15, tmp17)
tmp19 = 0.0
tmp20 = triton_helpers.maximum(tmp18, tmp19)
tmp21 = tmp19 - tmp20
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp6 + tmp22
tl.store(out_ptr0 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_add_clamp_div_exp_max_rsub_sum_2(in_out_ptr0, in_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 / tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(arg0_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(arg1_1, (16, 4, 4), (16, 1, 4), 0),
out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clamp_div_exp_max_sub_0[grid(256)](buf0, buf1, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused_add_clamp_div_exp_max_rsub_sum_1[grid(64)](buf1,
buf0, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf3 = buf1
del buf1
triton_poi_fused_add_clamp_div_exp_max_rsub_sum_2[grid(256)](buf3,
buf2, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf2
buf4 = buf0
del buf0
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(arg2_1, (16, 4, 4), (16, 4, 1), 0), out=buf4
)
del arg2_1
del buf3
return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0),
def restricted_softmax(src, dim=-1, margin=0):
src_max = torch.clamp(src.max(dim=dim, keepdim=True)[0], min=0)
out = (src - src_max).exp()
out = out / (out.sum(dim=dim, keepdim=True) + (margin - src_max).exp())
return out
class AttentionNew(torch.nn.Module):
def __init__(self, dropout=0):
super(AttentionNew, self).__init__()
self.dropout = dropout
def __repr__(self):
return '{}(dropout={})'.format(self.__class__.__name__, self.dropout)
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| cshjin/pytorch_geometric | Attention | false | 1,753 | [
"MIT"
] | 0 | 8dd0e76beb72135949a275edd851f80f7b97648f | https://github.com/cshjin/pytorch_geometric/tree/8dd0e76beb72135949a275edd851f80f7b97648f | import math
import torch
import torch.nn.functional as F
import torch.utils.data
def restricted_softmax(src, dim=-1, margin=0):
src_max = torch.clamp(src.max(dim=dim, keepdim=True)[0], min=0)
out = (src - src_max).exp()
out = out / (out.sum(dim=dim, keepdim=True) + (margin - src_max).exp())
return out
class Model(torch.nn.Module):
def __init__(self, dropout=0):
super().__init__()
self.dropout = dropout
def forward(self, query, key, value):
assert query.dim() == key.dim() == value.dim() >= 2
assert query.size(-1) == key.size(-1)
assert key.size(-2) == value.size(-2)
score = torch.matmul(query, key.transpose(-2, -1))
score = score / math.sqrt(key.size(-1))
score = restricted_softmax(score, dim=-1)
score = F.dropout(score, p=self.dropout, training=self.training)
return torch.matmul(score, value)
def __repr__(self):
return '{}(dropout={})'.format(self.__class__.__name__, self.dropout)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return []
|
ActFirstResBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/at/catox4w7srbmijm77a77jveyorvvfplzl3dow4ou42jivdjdzcod.py
# Topologically Sorted Source Nodes: [x, pad], Original ATen: [aten.leaky_relu, aten.reflection_pad2d]
# Source node to ATen node mapping:
# pad => _unsafe_index, _unsafe_index_1
# x => gt, mul, where
# Graph fragment:
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%primals_1, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, 0.2), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %primals_1, %mul), kwargs = {})
# %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where, [None, None, %sub_1, None]), kwargs = {})
# %_unsafe_index_1 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, None, %sub_1]), kwargs = {})
triton_poi_fused_leaky_relu_reflection_pad2d_0 = async_compile.triton('triton_poi_fused_leaky_relu_reflection_pad2d_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_reflection_pad2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = (xindex // 6) % 6
x2 = (xindex // 36)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x2)), xmask, eviction_policy='evict_last')
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 0.2
tmp4 = tmp0 * tmp3
tmp5 = tl.where(tmp2, tmp0, tmp4)
tl.store(out_ptr0 + (x3), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/ik/cik5qbhwt3mkw2phkuck42e54kmmja5u7bwdegg6xzogiza2muuy.py
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# x_1 => convolution
# x_2 => gt_1
# Graph fragment:
# %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_1 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {})
triton_poi_fused_convolution_leaky_relu_1 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tl.store(out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/bf/cbfypdkk75faja7xlmj7sivuos5gfo5fdejllruznpnd72ae4gt3.py
# Topologically Sorted Source Nodes: [x_1, x_2, pad_1], Original ATen: [aten.convolution, aten.leaky_relu, aten.reflection_pad2d]
# Source node to ATen node mapping:
# pad_1 => _unsafe_index_2, _unsafe_index_3
# x_1 => convolution
# x_2 => mul_1, where_1
# Graph fragment:
# %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.2), kwargs = {})
# %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %convolution, %mul_1), kwargs = {})
# %_unsafe_index_2 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_1, [None, None, %sub_1, None]), kwargs = {})
# %_unsafe_index_3 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_2, [None, None, None, %sub_1]), kwargs = {})
triton_poi_fused_convolution_leaky_relu_reflection_pad2d_2 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_reflection_pad2d_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*i1', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_reflection_pad2d_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_reflection_pad2d_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = (xindex // 6) % 6
x4 = (xindex // 36)
x2 = (xindex // 36) % 4
x5 = xindex
tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x4)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp1 = tl.load(in_ptr1 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x4)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = 0.2
tmp5 = tmp3 * tmp4
tmp6 = tl.where(tmp0, tmp3, tmp5)
tl.store(out_ptr0 + (x5), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/4g/c4ggrwz5dwisbmsoqxjaytjtns6mjlaxldj2bvi5bb2qmv2mmmle.py
# Topologically Sorted Source Nodes: [x_3, out], Original ATen: [aten.convolution, aten.add]
# Source node to ATen node mapping:
# out => add
# x_3 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_3, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %convolution_1), kwargs = {})
triton_poi_fused_add_convolution_3 = async_compile.triton('triton_poi_fused_add_convolution_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_3(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_out_ptr0 + (x3), xmask)
tmp2 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
# Topologically Sorted Source Nodes: [x, pad], Original ATen: [aten.leaky_relu, aten.reflection_pad2d]
stream0 = get_raw_stream(0)
triton_poi_fused_leaky_relu_reflection_pad2d_0.run(primals_1, buf0, 576, grid=grid(576), stream=stream0)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_1.run(buf1, primals_3, buf2, 256, grid=grid(256), stream=stream0)
buf3 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1, x_2, pad_1], Original ATen: [aten.convolution, aten.leaky_relu, aten.reflection_pad2d]
triton_poi_fused_convolution_leaky_relu_reflection_pad2d_2.run(buf2, buf1, primals_3, buf3, 576, grid=grid(576), stream=stream0)
del buf1
del primals_3
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [x_3, out], Original ATen: [aten.convolution, aten.add]
triton_poi_fused_add_convolution_3.run(buf5, primals_1, primals_5, 256, grid=grid(256), stream=stream0)
del primals_1
del primals_5
return (buf5, primals_2, primals_4, buf0, buf2, buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
from torch import nn
class AdaptiveInstanceNorm2d(nn.Module):
def __init__(self, num_features, eps=1e-05, momentum=0.1):
super(AdaptiveInstanceNorm2d, self).__init__()
self.num_features = num_features
self.eps = eps
self.momentum = momentum
self.weight = None
self.bias = None
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.ones(num_features))
def forward(self, x):
assert self.weight is not None and self.bias is not None, 'Please assign AdaIN weight first'
b, c = x.size(0), x.size(1)
running_mean = self.running_mean.repeat(b)
running_var = self.running_var.repeat(b)
x_reshaped = x.contiguous().view(1, b * c, *x.size()[2:])
out = F.batch_norm(x_reshaped, running_mean, running_var, self.
weight, self.bias, True, self.momentum, self.eps)
return out.view(b, c, *x.size()[2:])
def __repr__(self):
return self.__class__.__name__ + '(' + str(self.num_features) + ')'
class Conv2dBlock(nn.Module):
def __init__(self, in_dim, out_dim, ks, st, padding=0, norm='none',
activation='relu', pad_type='zero', use_bias=True, activation_first
=False, dropout='none'):
super(Conv2dBlock, self).__init__()
self.use_bias = use_bias
self.activation_first = activation_first
if pad_type == 'reflect':
self.pad = nn.ReflectionPad2d(padding)
elif pad_type == 'replicate':
self.pad = nn.ReplicationPad2d(padding)
elif pad_type == 'zero':
self.pad = nn.ZeroPad2d(padding)
else:
assert 0, 'Unsupported padding type: {}'.format(pad_type)
norm_dim = out_dim
if norm == 'bn':
self.norm = nn.BatchNorm2d(norm_dim)
elif norm == 'in':
self.norm = nn.InstanceNorm2d(norm_dim)
elif norm == 'adain':
self.norm = AdaptiveInstanceNorm2d(norm_dim)
elif norm == 'none':
self.norm = None
else:
assert 0, 'Unsupported normalization: {}'.format(norm)
if activation == 'relu':
self.activation = nn.ReLU()
elif activation == 'lrelu':
self.activation = nn.LeakyReLU(0.2)
elif activation == 'tanh':
self.activation = nn.Tanh()
elif activation == 'none':
self.activation = None
else:
assert 0, 'Unsupported activation: {}'.format(activation)
if dropout == 'none':
self.dropout = None
else:
self.dropout = nn.Dropout(dropout)
self.conv = nn.Conv2d(in_dim, out_dim, ks, st, bias=self.use_bias)
def forward(self, x):
if self.activation_first:
if self.activation:
x = self.activation(x)
x = self.conv(self.pad(x))
if self.norm:
x = self.norm(x)
else:
x = self.conv(self.pad(x))
if self.norm:
x = self.norm(x)
if self.activation:
x = self.activation(x)
if self.dropout:
x = self.dropout(x)
return x
class ActFirstResBlock(nn.Module):
def __init__(self, fin, fout, fhid=None, activation='lrelu', norm='none'):
super().__init__()
self.learned_shortcut = fin != fout
self.fin = fin
self.fout = fout
self.fhid = min(fin, fout) if fhid is None else fhid
self.conv_0 = Conv2dBlock(self.fin, self.fhid, 3, 1, padding=1,
pad_type='reflect', norm=norm, activation=activation,
activation_first=True)
self.conv_1 = Conv2dBlock(self.fhid, self.fout, 3, 1, padding=1,
pad_type='reflect', norm=norm, activation=activation,
activation_first=True)
if self.learned_shortcut:
self.conv_s = Conv2dBlock(self.fin, self.fout, 1, 1, activation
='none', use_bias=False)
def forward(self, x):
x_s = self.conv_s(x) if self.learned_shortcut else x
dx = self.conv_0(x)
dx = self.conv_1(dx)
out = x_s + dx
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'fin': 4, 'fout': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn.functional as F
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_leaky_relu_reflection_pad2d_0(in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6 % 6
x2 = xindex // 36
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2),
xmask, eviction_policy='evict_last')
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 0.2
tmp4 = tmp0 * tmp3
tmp5 = tl.where(tmp2, tmp0, tmp4)
tl.store(out_ptr0 + x3, tmp5, xmask)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tl.store(out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_reflection_pad2d_2(in_ptr0,
in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6 % 6
x4 = xindex // 36
x2 = xindex // 36 % 4
x5 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x4),
xmask, eviction_policy='evict_last').to(tl.int1)
tmp1 = tl.load(in_ptr1 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x4),
xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = 0.2
tmp5 = tmp3 * tmp4
tmp6 = tl.where(tmp0, tmp3, tmp5)
tl.store(out_ptr0 + x5, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_convolution_3(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_out_ptr0 + x3, xmask)
tmp2 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + x3, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_leaky_relu_reflection_pad2d_0[grid(576)](primals_1,
buf0, 576, XBLOCK=128, num_warps=4, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_convolution_leaky_relu_1[grid(256)](buf1,
primals_3, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
triton_poi_fused_convolution_leaky_relu_reflection_pad2d_2[grid(576)](
buf2, buf1, primals_3, buf3, 576, XBLOCK=256, num_warps=4,
num_stages=1)
del buf1
del primals_3
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1))
buf5 = buf4
del buf4
triton_poi_fused_add_convolution_3[grid(256)](buf5, primals_1,
primals_5, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_5
return buf5, primals_2, primals_4, buf0, buf2, buf3
class AdaptiveInstanceNorm2d(nn.Module):
def __init__(self, num_features, eps=1e-05, momentum=0.1):
super(AdaptiveInstanceNorm2d, self).__init__()
self.num_features = num_features
self.eps = eps
self.momentum = momentum
self.weight = None
self.bias = None
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.ones(num_features))
def forward(self, x):
assert self.weight is not None and self.bias is not None, 'Please assign AdaIN weight first'
b, c = x.size(0), x.size(1)
running_mean = self.running_mean.repeat(b)
running_var = self.running_var.repeat(b)
x_reshaped = x.contiguous().view(1, b * c, *x.size()[2:])
out = F.batch_norm(x_reshaped, running_mean, running_var, self.
weight, self.bias, True, self.momentum, self.eps)
return out.view(b, c, *x.size()[2:])
def __repr__(self):
return self.__class__.__name__ + '(' + str(self.num_features) + ')'
class Conv2dBlock(nn.Module):
def __init__(self, in_dim, out_dim, ks, st, padding=0, norm='none',
activation='relu', pad_type='zero', use_bias=True, activation_first
=False, dropout='none'):
super(Conv2dBlock, self).__init__()
self.use_bias = use_bias
self.activation_first = activation_first
if pad_type == 'reflect':
self.pad = nn.ReflectionPad2d(padding)
elif pad_type == 'replicate':
self.pad = nn.ReplicationPad2d(padding)
elif pad_type == 'zero':
self.pad = nn.ZeroPad2d(padding)
else:
assert 0, 'Unsupported padding type: {}'.format(pad_type)
norm_dim = out_dim
if norm == 'bn':
self.norm = nn.BatchNorm2d(norm_dim)
elif norm == 'in':
self.norm = nn.InstanceNorm2d(norm_dim)
elif norm == 'adain':
self.norm = AdaptiveInstanceNorm2d(norm_dim)
elif norm == 'none':
self.norm = None
else:
assert 0, 'Unsupported normalization: {}'.format(norm)
if activation == 'relu':
self.activation = nn.ReLU()
elif activation == 'lrelu':
self.activation = nn.LeakyReLU(0.2)
elif activation == 'tanh':
self.activation = nn.Tanh()
elif activation == 'none':
self.activation = None
else:
assert 0, 'Unsupported activation: {}'.format(activation)
if dropout == 'none':
self.dropout = None
else:
self.dropout = nn.Dropout(dropout)
self.conv = nn.Conv2d(in_dim, out_dim, ks, st, bias=self.use_bias)
def forward(self, x):
if self.activation_first:
if self.activation:
x = self.activation(x)
x = self.conv(self.pad(x))
if self.norm:
x = self.norm(x)
else:
x = self.conv(self.pad(x))
if self.norm:
x = self.norm(x)
if self.activation:
x = self.activation(x)
if self.dropout:
x = self.dropout(x)
return x
class ActFirstResBlockNew(nn.Module):
def __init__(self, fin, fout, fhid=None, activation='lrelu', norm='none'):
super().__init__()
self.learned_shortcut = fin != fout
self.fin = fin
self.fout = fout
self.fhid = min(fin, fout) if fhid is None else fhid
self.conv_0 = Conv2dBlock(self.fin, self.fhid, 3, 1, padding=1,
pad_type='reflect', norm=norm, activation=activation,
activation_first=True)
self.conv_1 = Conv2dBlock(self.fhid, self.fout, 3, 1, padding=1,
pad_type='reflect', norm=norm, activation=activation,
activation_first=True)
if self.learned_shortcut:
self.conv_s = Conv2dBlock(self.fin, self.fout, 1, 1, activation
='none', use_bias=False)
def forward(self, input_0):
primals_2 = self.conv_0.conv.weight
primals_3 = self.conv_0.conv.bias
primals_4 = self.conv_1.conv.weight
primals_5 = self.conv_1.conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| cplusx/SIGN | ActFirstResBlock | false | 1,754 | [
"Apache-2.0"
] | 0 | 9777fc3ddd4c6f799caeefe1e72482d5b1ecd8ae | https://github.com/cplusx/SIGN/tree/9777fc3ddd4c6f799caeefe1e72482d5b1ecd8ae | import torch
import torch.nn.functional as F
from torch import nn
class AdaptiveInstanceNorm2d(nn.Module):
def __init__(self, num_features, eps=1e-05, momentum=0.1):
super().__init__()
self.num_features = num_features
self.eps = eps
self.momentum = momentum
self.weight = None
self.bias = None
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.ones(num_features))
def forward(self, x):
assert self.weight is not None and self.bias is not None, 'Please assign AdaIN weight first'
b, c = x.size(0), x.size(1)
running_mean = self.running_mean.repeat(b)
running_var = self.running_var.repeat(b)
x_reshaped = x.contiguous().view(1, b * c, *x.size()[2:])
out = F.batch_norm(x_reshaped, running_mean, running_var, self.
weight, self.bias, True, self.momentum, self.eps)
return out.view(b, c, *x.size()[2:])
def __repr__(self):
return self.__class__.__name__ + '(' + str(self.num_features) + ')'
class Conv2dBlock(nn.Module):
def __init__(self, in_dim, out_dim, ks, st, padding=0, norm='none',
activation='relu', pad_type='zero', use_bias=True, activation_first
=False, dropout='none'):
super().__init__()
self.use_bias = use_bias
self.activation_first = activation_first
if pad_type == 'reflect':
self.pad = nn.ReflectionPad2d(padding)
elif pad_type == 'replicate':
self.pad = nn.ReplicationPad2d(padding)
elif pad_type == 'zero':
self.pad = nn.ZeroPad2d(padding)
else:
assert 0, 'Unsupported padding type: {}'.format(pad_type)
norm_dim = out_dim
if norm == 'bn':
self.norm = nn.BatchNorm2d(norm_dim)
elif norm == 'in':
self.norm = nn.InstanceNorm2d(norm_dim)
elif norm == 'adain':
self.norm = AdaptiveInstanceNorm2d(norm_dim)
elif norm == 'none':
self.norm = None
else:
assert 0, 'Unsupported normalization: {}'.format(norm)
if activation == 'relu':
self.activation = nn.ReLU()
elif activation == 'lrelu':
self.activation = nn.LeakyReLU(0.2)
elif activation == 'tanh':
self.activation = nn.Tanh()
elif activation == 'none':
self.activation = None
else:
assert 0, 'Unsupported activation: {}'.format(activation)
if dropout == 'none':
self.dropout = None
else:
self.dropout = nn.Dropout(dropout)
self.conv = nn.Conv2d(in_dim, out_dim, ks, st, bias=self.use_bias)
def forward(self, x):
if self.activation_first:
if self.activation:
x = self.activation(x)
x = self.conv(self.pad(x))
if self.norm:
x = self.norm(x)
else:
x = self.conv(self.pad(x))
if self.norm:
x = self.norm(x)
if self.activation:
x = self.activation(x)
if self.dropout:
x = self.dropout(x)
return x
class Model(nn.Module):
def __init__(self, fin, fout, fhid=None, activation='lrelu', norm='none'):
super().__init__()
self.learned_shortcut = fin != fout
self.fin = fin
self.fout = fout
self.fhid = min(fin, fout) if fhid is None else fhid
self.conv_0 = Conv2dBlock(self.fin, self.fhid, 3, 1, padding=1,
pad_type='reflect', norm=norm, activation=activation,
activation_first=True)
self.conv_1 = Conv2dBlock(self.fhid, self.fout, 3, 1, padding=1,
pad_type='reflect', norm=norm, activation=activation,
activation_first=True)
if self.learned_shortcut:
self.conv_s = Conv2dBlock(self.fin, self.fout, 1, 1, activation
='n
# ... truncated (>4000 chars) for memory efficiency |
SoftmaxLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/jh/cjhafiazvhnqzahvgpeyzzxgeb5atp7ebiv4plitnblial63qxb6.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 1), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {})
# %div_tensor : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 1.0), kwargs = {})
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp3 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tl.store(out_ptr0 + (x3), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/t2/ct2dbabladhyyceg2gmfqrslgo4edv7x6gs7iscumud7suileuje.py
# Topologically Sorted Source Nodes: [l], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.div]
# Source node to ATen node mapping:
# l => div_1, exp, log, mul, neg, sub_1, sum_1, sum_2
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor, %log), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %arg1_1), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sum_2,), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Scalar](args = (%neg, 64), kwargs = {})
triton_per_fused__log_softmax_div_mul_neg_sum_1 = async_compile.triton('triton_per_fused__log_softmax_div_mul_neg_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_div_mul_neg_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 6, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__log_softmax_div_mul_neg_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r3 = rindex
r0 = rindex % 16
r2 = (rindex // 64)
tmp0 = tl.load(in_ptr0 + (r3), None)
tmp1 = tl.load(in_ptr0 + (r0 + (64*r2)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (32 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (48 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (r3), None)
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tmp15 = tmp13 * tmp14
tmp16 = tl.broadcast_to(tmp15, [RBLOCK])
tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0))
tmp19 = -tmp18
tmp20 = 0.015625
tmp21 = tmp19 * tmp20
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp21, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [l], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.div]
triton_per_fused__log_softmax_div_mul_neg_sum_1.run(buf2, buf0, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg1_1
del buf0
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.cpp_extension
class SoftmaxLoss(torch.nn.Module):
def __init__(self, tau=1.0):
super().__init__()
self.tau = tau
self.ce_loss = torch.nn.CrossEntropyLoss()
def forward(self, pred, true):
logits = pred / self.tau
l = self.ce_loss(logits, true)
return l
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.utils.cpp_extension
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp3 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp1
tl.store(out_ptr0 + x3, tmp15, xmask)
@triton.jit
def triton_per_fused__log_softmax_div_mul_neg_sum_1(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r3 = rindex
r0 = rindex % 16
r2 = rindex // 64
tmp0 = tl.load(in_ptr0 + r3, None)
tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr1 + r3, None)
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tmp15 = tmp13 * tmp14
tmp16 = tl.broadcast_to(tmp15, [RBLOCK])
tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0))
tmp19 = -tmp18
tmp20 = 0.015625
tmp21 = tmp19 * tmp20
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp21, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
triton_per_fused__log_softmax_div_mul_neg_sum_1[grid(1)](buf2, buf0,
arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg1_1
del buf0
return buf2,
class SoftmaxLossNew(torch.nn.Module):
def __init__(self, tau=1.0):
super().__init__()
self.tau = tau
self.ce_loss = torch.nn.CrossEntropyLoss()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| crobbins327/semanticGAN_WSI | SoftmaxLoss | false | 1,755 | [
"BSD-2-Clause",
"MIT"
] | 0 | 4046ddc822f463e03952402247f79d540bf7be95 | https://github.com/crobbins327/semanticGAN_WSI/tree/4046ddc822f463e03952402247f79d540bf7be95 | import torch
import torch.utils.cpp_extension
class Model(torch.nn.Module):
def __init__(self, tau=1.0):
super().__init__()
self.tau = tau
self.ce_loss = torch.nn.CrossEntropyLoss()
def forward(self, pred, true):
logits = pred / self.tau
l = self.ce_loss(logits, true)
return l
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
Gate | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/6v/c6v5wbm5lsunafktut4znlxsnjnnqyz6khmykdxvnjhemkarthul.py
# Topologically Sorted Source Nodes: [gate, mul], Original ATen: [aten.sigmoid, aten.mul]
# Source node to ATen node mapping:
# gate => sigmoid
# mul => mul
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%primals_2,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %sigmoid), kwargs = {})
triton_poi_fused_mul_sigmoid_0 = async_compile.triton('triton_poi_fused_mul_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask)
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(in_out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_proj], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [gate, mul], Original ATen: [aten.sigmoid, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_sigmoid_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0)
return (buf1, primals_2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class Gate(nn.Module):
"""Gate Unit
g = sigmoid(Wx)
x = g * x
"""
def __init__(self, input_size):
super(Gate, self).__init__()
self.linear = nn.Linear(input_size, input_size, bias=False)
def forward(self, x):
"""
Args:
x: batch * len * dim
x_mask: batch * len (1 for padding, 0 for true)
Output:
res: batch * len * dim
"""
x_proj = self.linear(x)
gate = F.sigmoid(x)
return x_proj * gate
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(in_out_ptr0 + x0, tmp3, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_mul_sigmoid_0[grid(256)](buf1, primals_2, 256,
XBLOCK=256, num_warps=4, num_stages=1)
return buf1, primals_2
class GateNew(nn.Module):
"""Gate Unit
g = sigmoid(Wx)
x = g * x
"""
def __init__(self, input_size):
super(GateNew, self).__init__()
self.linear = nn.Linear(input_size, input_size, bias=False)
def forward(self, input_0):
primals_1 = self.linear.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
| csarron/QAModels | Gate | false | 1,756 | [
"BSD-3-Clause"
] | 0 | 2db2d7b0f546b88211e111b42744408bbf9b6f35 | https://github.com/csarron/QAModels/tree/2db2d7b0f546b88211e111b42744408bbf9b6f35 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
"""Gate Unit
g = sigmoid(Wx)
x = g * x
"""
def __init__(self, input_size):
super().__init__()
self.linear = nn.Linear(input_size, input_size, bias=False)
def forward(self, x):
"""
Args:
x: batch * len * dim
x_mask: batch * len (1 for padding, 0 for true)
Output:
res: batch * len * dim
"""
x_proj = self.linear(x)
gate = F.sigmoid(x)
return x_proj * gate
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
Linear | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/bh/cbhpcgxjg3mwo4dulstw5ie26none2yzi5sysdzl34cu6pyah4fg.py
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.add, aten.view]
# Source node to ATen node mapping:
# out_1 => add, view_3
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_3), kwargs = {})
# %view_3 : [num_users=1] = call_function[target=torch.ops.aten.reshape.default](args = (%view_2, [4, 4, 4, 4]), kwargs = {})
triton_poi_fused_add_view_0 = async_compile.triton('triton_poi_fused_add_view_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_view_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_view_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x4), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (4, 1), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.add, aten.view]
stream0 = get_raw_stream(0)
triton_poi_fused_add_view_0.run(buf2, primals_3, 256, grid=grid(256), stream=stream0)
del primals_3
return (buf2, reinterpret_tensor(primals_2, (4, 64), (1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((1, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
from torch import Tensor
from torch.nn import Linear
from torch.nn import Parameter
import torch.utils.data
def uniform(size, tensor):
bound = 1.0 / math.sqrt(size)
if tensor is not None:
tensor.data.uniform_(-bound, bound)
def kaiming_uniform(tensor, fan, a):
if tensor is not None:
bound = math.sqrt(6 / ((1 + a ** 2) * fan))
tensor.data.uniform_(-bound, bound)
class Linear(torch.nn.Module):
def __init__(self, in_channels, out_channels, groups=1, bias=True):
super(Linear, self).__init__()
assert in_channels % groups == 0 and out_channels % groups == 0
self.in_channels = in_channels
self.out_channels = out_channels
self.groups = groups
self.weight = Parameter(Tensor(groups, in_channels // groups,
out_channels // groups))
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
kaiming_uniform(self.weight, fan=self.weight.size(1), a=math.sqrt(5))
uniform(self.weight.size(1), self.bias)
def forward(self, src):
if self.groups > 1:
size = list(src.size())[:-1]
src = src.view(-1, self.groups, self.in_channels // self.groups)
src = src.transpose(0, 1).contiguous()
out = torch.matmul(src, self.weight)
out = out.transpose(1, 0).contiguous()
out = out.view(*(size + [self.out_channels]))
else:
out = torch.matmul(src, self.weight.squeeze(0))
if self.bias is not None:
out += self.bias
return out
def __repr__(self):
return '{}({}, {}, groups={}, bias={})'.format(self.__class__.
__name__, self.in_channels, self.out_channels, self.groups,
self.bias is not None)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
from torch import Tensor
from torch.nn import Parameter
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_view_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x4, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (4, 1), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf2 = buf1
del buf1
get_raw_stream(0)
triton_poi_fused_add_view_0[grid(256)](buf2, primals_3, 256, XBLOCK
=256, num_warps=4, num_stages=1)
del primals_3
return buf2, reinterpret_tensor(primals_2, (4, 64), (1, 4), 0)
def uniform(size, tensor):
bound = 1.0 / math.sqrt(size)
if tensor is not None:
tensor.data.uniform_(-bound, bound)
def kaiming_uniform(tensor, fan, a):
if tensor is not None:
bound = math.sqrt(6 / ((1 + a ** 2) * fan))
tensor.data.uniform_(-bound, bound)
class LinearNew(torch.nn.Module):
def __init__(self, in_channels, out_channels, groups=1, bias=True):
super(LinearNew, self).__init__()
assert in_channels % groups == 0 and out_channels % groups == 0
self.in_channels = in_channels
self.out_channels = out_channels
self.groups = groups
self.weight = Parameter(Tensor(groups, in_channels // groups,
out_channels // groups))
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
kaiming_uniform(self.weight, fan=self.weight.size(1), a=math.sqrt(5))
uniform(self.weight.size(1), self.bias)
def __repr__(self):
return '{}({}, {}, groups={}, bias={})'.format(self.__class__.
__name__, self.in_channels, self.out_channels, self.groups,
self.bias is not None)
def forward(self, input_0):
primals_1 = self.weight
primals_3 = self.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| cshjin/pytorch_geometric | Linear | false | 1,757 | [
"MIT"
] | 0 | 8dd0e76beb72135949a275edd851f80f7b97648f | https://github.com/cshjin/pytorch_geometric/tree/8dd0e76beb72135949a275edd851f80f7b97648f | import math
import torch
from torch import Tensor
from torch.nn import Linear
from torch.nn import Parameter
import torch.utils.data
def uniform(size, tensor):
bound = 1.0 / math.sqrt(size)
if tensor is not None:
tensor.data.uniform_(-bound, bound)
def kaiming_uniform(tensor, fan, a):
if tensor is not None:
bound = math.sqrt(6 / ((1 + a ** 2) * fan))
tensor.data.uniform_(-bound, bound)
class Model(torch.nn.Module):
def __init__(self, in_channels, out_channels, groups=1, bias=True):
super().__init__()
assert in_channels % groups == 0 and out_channels % groups == 0
self.in_channels = in_channels
self.out_channels = out_channels
self.groups = groups
self.weight = Parameter(Tensor(groups, in_channels // groups,
out_channels // groups))
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
kaiming_uniform(self.weight, fan=self.weight.size(1), a=math.sqrt(5))
uniform(self.weight.size(1), self.bias)
def forward(self, src):
if self.groups > 1:
size = list(src.size())[:-1]
src = src.view(-1, self.groups, self.in_channels // self.groups)
src = src.transpose(0, 1).contiguous()
out = torch.matmul(src, self.weight)
out = out.transpose(1, 0).contiguous()
out = out.view(*(size + [self.out_channels]))
else:
out = torch.matmul(src, self.weight.squeeze(0))
if self.bias is not None:
out += self.bias
return out
def __repr__(self):
return '{}({}, {}, groups={}, bias={})'.format(self.__class__.
__name__, self.in_channels, self.out_channels, self.groups,
self.bias is not None)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
SFU | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/3l/c3lo77c7wjxasxrhtr6wesb72ods2d2rxnxhbfieun7j2wukm3wn.py
# Topologically Sorted Source Nodes: [r_f], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# r_f => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 2), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/4h/c4hvd3vhapir7uardh64cxnc57lcta2i2xqwszrnyxam4nh7uanb.py
# Topologically Sorted Source Nodes: [r, g, mul, sub, mul_1, o], Original ATen: [aten.tanh, aten.sigmoid, aten.mul, aten.rsub, aten.add]
# Source node to ATen node mapping:
# g => sigmoid
# mul => mul
# mul_1 => mul_1
# o => add
# r => tanh
# sub => sub
# Graph fragment:
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {})
# %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_3,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %tanh), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %primals_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
triton_poi_fused_add_mul_rsub_sigmoid_tanh_1 = async_compile.triton('triton_poi_fused_add_mul_rsub_sigmoid_tanh_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_rsub_sigmoid_tanh_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_rsub_sigmoid_tanh_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp2 = tl.load(in_ptr1 + (x0), xmask)
tmp7 = tl.load(in_ptr2 + (x0), xmask)
tmp1 = tl.sigmoid(tmp0)
tmp3 = libdevice.tanh(tmp2)
tmp4 = tmp1 * tmp3
tmp5 = 1.0
tmp6 = tmp5 - tmp1
tmp8 = tmp6 * tmp7
tmp9 = tmp4 + tmp8
tl.store(out_ptr0 + (x0), tmp9, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, 8), (8, 1))
assert_size_stride(primals_6, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [r_f], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 128, grid=grid(128), stream=stream0)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_4, reinterpret_tensor(buf0, (16, 8), (8, 1), 0), reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf1)
del primals_3
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_6, reinterpret_tensor(buf0, (16, 8), (8, 1), 0), reinterpret_tensor(primals_5, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf2)
del primals_5
del primals_6
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [r, g, mul, sub, mul_1, o], Original ATen: [aten.tanh, aten.sigmoid, aten.mul, aten.rsub, aten.add]
triton_poi_fused_add_mul_rsub_sigmoid_tanh_1.run(buf2, buf1, primals_1, buf3, 64, grid=grid(64), stream=stream0)
return (buf3, primals_1, reinterpret_tensor(buf0, (16, 8), (8, 1), 0), buf1, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class SFU(nn.Module):
"""Semantic Fusion Unit
The ouput vector is expected to not only retrieve correlative information from fusion vectors,
but also retain partly unchange as the input vector
"""
def __init__(self, input_size, fusion_size):
super(SFU, self).__init__()
self.linear_r = nn.Linear(input_size + fusion_size, input_size)
self.linear_g = nn.Linear(input_size + fusion_size, input_size)
def forward(self, x, fusions):
r_f = torch.cat([x, fusions], 2)
r = F.tanh(self.linear_r(r_f))
g = F.sigmoid(self.linear_g(r_f))
o = g * r + (1 - g) * x
return o
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'fusion_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_mul_rsub_sigmoid_tanh_1(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp7 = tl.load(in_ptr2 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp3 = libdevice.tanh(tmp2)
tmp4 = tmp1 * tmp3
tmp5 = 1.0
tmp6 = tmp5 - tmp1
tmp8 = tmp6 * tmp7
tmp9 = tmp4 + tmp8
tl.store(out_ptr0 + x0, tmp9, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 8), (8, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(128)](primals_1, primals_2, buf0, 128,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_4, reinterpret_tensor(buf0, (16, 8), (
8, 1), 0), reinterpret_tensor(primals_3, (8, 4), (1, 8), 0),
alpha=1, beta=1, out=buf1)
del primals_3
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_6, reinterpret_tensor(buf0, (16, 8), (
8, 1), 0), reinterpret_tensor(primals_5, (8, 4), (1, 8), 0),
alpha=1, beta=1, out=buf2)
del primals_5
del primals_6
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_mul_rsub_sigmoid_tanh_1[grid(64)](buf2, buf1,
primals_1, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1)
return buf3, primals_1, reinterpret_tensor(buf0, (16, 8), (8, 1), 0
), buf1, buf2
class SFUNew(nn.Module):
"""Semantic Fusion Unit
The ouput vector is expected to not only retrieve correlative information from fusion vectors,
but also retain partly unchange as the input vector
"""
def __init__(self, input_size, fusion_size):
super(SFUNew, self).__init__()
self.linear_r = nn.Linear(input_size + fusion_size, input_size)
self.linear_g = nn.Linear(input_size + fusion_size, input_size)
def forward(self, input_0, input_1):
primals_3 = self.linear_r.weight
primals_4 = self.linear_r.bias
primals_5 = self.linear_g.weight
primals_6 = self.linear_g.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
| csarron/QAModels | SFU | false | 1,758 | [
"BSD-3-Clause"
] | 0 | 2db2d7b0f546b88211e111b42744408bbf9b6f35 | https://github.com/csarron/QAModels/tree/2db2d7b0f546b88211e111b42744408bbf9b6f35 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
"""Semantic Fusion Unit
The ouput vector is expected to not only retrieve correlative information from fusion vectors,
but also retain partly unchange as the input vector
"""
def __init__(self, input_size, fusion_size):
super().__init__()
self.linear_r = nn.Linear(input_size + fusion_size, input_size)
self.linear_g = nn.Linear(input_size + fusion_size, input_size)
def forward(self, x, fusions):
r_f = torch.cat([x, fusions], 2)
r = F.tanh(self.linear_r(r_f))
g = F.sigmoid(self.linear_g(r_f))
o = g * r + (1 - g) * x
return o
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
DecoderLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/xx/cxxsy7adcakcv5c6imnaqsvf3ebhgbph77vaoembzakbqdrjycbc.py
# Topologically Sorted Source Nodes: [mean, sub, mul, std, add, truediv, norm], Original ATen: [aten.mean, aten.sub, aten.mul, aten.std, aten.add, aten.div]
# Source node to ATen node mapping:
# add => add
# mean => mean
# mul => mul
# norm => add_1
# std => sqrt, var
# sub => sub
# truediv => div
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_2, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_2, %mean), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %sub), kwargs = {})
# %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%primals_2, [-1]), kwargs = {correction: 1.0, keepdim: True})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%var,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt, 1e-06), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, %add), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, %primals_3), kwargs = {})
triton_poi_fused_add_div_mean_mul_std_sub_0 = async_compile.triton('triton_poi_fused_add_div_mean_mul_std_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_std_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mean_mul_std_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = 4.0
tmp10 = tmp8 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp0 * tmp11
tmp13 = tmp2 - tmp10
tmp14 = tmp13 * tmp13
tmp15 = tmp3 - tmp10
tmp16 = tmp15 * tmp15
tmp17 = tmp14 + tmp16
tmp18 = tmp5 - tmp10
tmp19 = tmp18 * tmp18
tmp20 = tmp17 + tmp19
tmp21 = tmp7 - tmp10
tmp22 = tmp21 * tmp21
tmp23 = tmp20 + tmp22
tmp24 = 3.0
tmp25 = tmp23 / tmp24
tmp26 = libdevice.sqrt(tmp25)
tmp27 = 1e-06
tmp28 = tmp26 + tmp27
tmp29 = tmp12 / tmp28
tmp31 = tmp29 + tmp30
tl.store(out_ptr0 + (x2), tmp31, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/mh/cmhet4vfl4jlxtge4zzaaa2nugvxpr5f4ge7rs72qwe2aow7vxwy.py
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# matmul => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/pv/cpvxifzinnau3xu6vyqheclzr6mcqs5g6lyequacfqmznapjidec.py
# Topologically Sorted Source Nodes: [eq], Original ATen: [aten.eq]
# Source node to ATen node mapping:
# eq => eq
# Graph fragment:
# %eq : [num_users=2] = call_function[target=torch.ops.aten.eq.Scalar](args = (%unsqueeze, 0), kwargs = {})
triton_poi_fused_eq_2 = async_compile.triton('triton_poi_fused_eq_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_eq_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_eq_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 == tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/2i/c2i57js6jak6bynwgyueladupcbb2qpciiu6btv5dj64tjdpelzi.py
# Topologically Sorted Source Nodes: [scores, scores_1, scores_2], Original ATen: [aten.div, aten.masked_fill, aten._softmax]
# Source node to ATen node mapping:
# scores => div_1
# scores_1 => full_default, where
# scores_2 => amax, exp, sub_1, sum_1
# Graph fragment:
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_11, 1.0), kwargs = {})
# %full_default : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([], -1000000000.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %div_1), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where, [-1], True), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
triton_poi_fused__softmax_div_masked_fill_3 = async_compile.triton('triton_poi_fused__softmax_div_masked_fill_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*i1', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_div_masked_fill_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_div_masked_fill_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = (xindex // 16)
x3 = xindex
tmp0 = tl.load(in_ptr0 + ((4*x0) + (16*x2)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp1 = tl.load(in_ptr1 + (4*x3), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp7 = tl.load(in_ptr1 + (1 + (4*x3)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (2 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp12 = tl.load(in_ptr1 + (2 + (4*x3)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (3 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp17 = tl.load(in_ptr1 + (3 + (4*x3)), xmask, eviction_policy='evict_last')
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp4 = -1000000000.0
tmp5 = tl.where(tmp0, tmp4, tmp3)
tmp8 = tmp7 * tmp2
tmp9 = tl.where(tmp6, tmp4, tmp8)
tmp10 = triton_helpers.maximum(tmp5, tmp9)
tmp13 = tmp12 * tmp2
tmp14 = tl.where(tmp11, tmp4, tmp13)
tmp15 = triton_helpers.maximum(tmp10, tmp14)
tmp18 = tmp17 * tmp2
tmp19 = tl.where(tmp16, tmp4, tmp18)
tmp20 = triton_helpers.maximum(tmp15, tmp19)
tmp21 = tmp5 - tmp20
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp9 - tmp20
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp26 = tmp14 - tmp20
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp25 + tmp27
tmp29 = tmp19 - tmp20
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp28 + tmp30
tl.store(out_ptr0 + (x3), tmp20, xmask)
tl.store(out_ptr1 + (x3), tmp31, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/rw/crwno6mlbzllxkax3dzrhb3xva3g3yd52nkztijmiyd6omrsilpe.py
# Topologically Sorted Source Nodes: [scores, scores_1, scores_2], Original ATen: [aten.div, aten.masked_fill, aten._softmax]
# Source node to ATen node mapping:
# scores => div_1
# scores_1 => full_default, where
# scores_2 => amax, div_2, exp, sub_1
# Graph fragment:
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_11, 1.0), kwargs = {})
# %full_default : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([], -1000000000.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %div_1), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where, [-1], True), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
# %div_2 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_div_masked_fill_4 = async_compile.triton('triton_poi_fused__softmax_div_masked_fill_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_div_masked_fill_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_div_masked_fill_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = (xindex // 64)
x4 = xindex % 16
x5 = xindex
x6 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x4 + (16*x3)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp1 = tl.load(in_out_ptr0 + (x5), xmask)
tmp6 = tl.load(in_ptr1 + (x6), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + (x6), xmask, eviction_policy='evict_last')
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp4 = -1000000000.0
tmp5 = tl.where(tmp0, tmp4, tmp3)
tmp7 = tmp5 - tmp6
tmp8 = tl_math.exp(tmp7)
tmp10 = tmp8 / tmp9
tl.store(in_out_ptr0 + (x5), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/we/cwe54p4p4jvwbdktkpj3wy2coheu6f3r3dgvi7ozm7xjfk4mgbwx.py
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# contiguous => clone_4
# Graph fragment:
# %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_5 = async_compile.triton('triton_poi_fused_clone_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/ez/cezumc4bfxcfwb2v57uxko3oual5p4k5atbughboziniozdii3kw.py
# Topologically Sorted Source Nodes: [x, mean_1, std_1], Original ATen: [aten.add, aten.mean, aten.std]
# Source node to ATen node mapping:
# mean_1 => mean_1
# std_1 => var_1
# x => add_2
# Graph fragment:
# %add_2 : [num_users=4] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_2, %view_17), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add_2, [-1], True), kwargs = {})
# %var_1 : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%add_2, [-1]), kwargs = {correction: 1.0, keepdim: True})
triton_poi_fused_add_mean_std_6 = async_compile.triton('triton_poi_fused_add_mean_std_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mean_std_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mean_std_6(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = 3.0
tmp29 = tmp27 / tmp28
tl.store(out_ptr0 + (x0), tmp16, xmask)
tl.store(in_out_ptr0 + (x0), tmp29, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/44/c44nsundpysjinc27mkdhx3tcd7nqj5ir3mp6sd7nqx3ryqium46.py
# Topologically Sorted Source Nodes: [x, mean_1, sub_1, mul_1, std_1, add_3, truediv_2, norm_1], Original ATen: [aten.add, aten.mean, aten.sub, aten.mul, aten.std, aten.div]
# Source node to ATen node mapping:
# add_3 => add_3
# mean_1 => mean_1
# mul_1 => mul_1
# norm_1 => add_4
# std_1 => sqrt_1
# sub_1 => sub_2
# truediv_2 => div_3
# x => add_2
# Graph fragment:
# %add_2 : [num_users=4] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_2, %view_17), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add_2, [-1], True), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_2, %mean_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_13, %sub_2), kwargs = {})
# %sqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%var_1,), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt_1, 1e-06), kwargs = {})
# %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_1, %add_3), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div_3, %primals_14), kwargs = {})
triton_poi_fused_add_div_mean_mul_std_sub_7 = async_compile.triton('triton_poi_fused_add_div_mean_mul_std_sub_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_std_sub_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mean_mul_std_sub_7(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr2 + (x2), xmask)
tmp4 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 - tmp4
tmp6 = tmp0 * tmp5
tmp8 = libdevice.sqrt(tmp7)
tmp9 = 1e-06
tmp10 = tmp8 + tmp9
tmp11 = tmp6 / tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/7x/c7xmgbb6q37ywt2xwwuyvmm2fa6ua7pjotmzueh3bzjhceppzwqs.py
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.add]
# Source node to ATen node mapping:
# x => add_2
# x_1 => add_5
# Graph fragment:
# %add_2 : [num_users=4] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_2, %view_17), kwargs = {})
# %add_5 : [num_users=5] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %view_35), kwargs = {})
triton_poi_fused_add_8 = async_compile.triton('triton_poi_fused_add_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_8(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp3 = tl.load(in_out_ptr0 + (x2), xmask)
tmp4 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/ij/cij76hcpn7nv423txzk62pwmo265onn7xwme2hfe53h4yzdsnyxd.py
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# relu => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_37,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_9 = async_compile.triton('triton_poi_fused_relu_threshold_backward_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_9(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 2048
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/cn/ccn3ztml5qupb2u6trhxfni5siq2chr5f7il63gpx775u3hi6iz5.py
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.add]
# Source node to ATen node mapping:
# x_4 => add_8
# Graph fragment:
# %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_5, %view_39), kwargs = {})
triton_poi_fused_add_10 = async_compile.triton('triton_poi_fused_add_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_10(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_out_ptr0 + (x2), xmask)
tmp2 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30 = args
args.clear()
assert_size_stride(primals_1, (4, ), (1, ))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_11, (4, 4), (4, 1))
assert_size_stride(primals_12, (4, ), (1, ))
assert_size_stride(primals_13, (4, ), (1, ))
assert_size_stride(primals_14, (4, ), (1, ))
assert_size_stride(primals_15, (4, 4), (4, 1))
assert_size_stride(primals_16, (4, ), (1, ))
assert_size_stride(primals_17, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_18, (4, 4), (4, 1))
assert_size_stride(primals_19, (4, ), (1, ))
assert_size_stride(primals_20, (4, 4), (4, 1))
assert_size_stride(primals_21, (4, ), (1, ))
assert_size_stride(primals_22, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_23, (4, 4), (4, 1))
assert_size_stride(primals_24, (4, ), (1, ))
assert_size_stride(primals_25, (4, ), (1, ))
assert_size_stride(primals_26, (4, ), (1, ))
assert_size_stride(primals_27, (2048, 4), (4, 1))
assert_size_stride(primals_28, (2048, ), (1, ))
assert_size_stride(primals_29, (4, 2048), (2048, 1))
assert_size_stride(primals_30, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mean, sub, mul, std, add, truediv, norm], Original ATen: [aten.mean, aten.sub, aten.mul, aten.std, aten.add, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_mean_mul_std_sub_0.run(primals_1, primals_2, primals_3, buf0, 64, grid=grid(64), stream=stream0)
del primals_1
del primals_3
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2)
buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
triton_poi_fused_clone_1.run(buf2, primals_7, buf4, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_7
buf5 = reinterpret_tensor(buf2, (4, 4, 1, 4), (16, 4, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
triton_poi_fused_clone_1.run(buf1, primals_5, buf5, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_5
buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), out=buf6)
buf7 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [eq], Original ATen: [aten.eq]
triton_poi_fused_eq_2.run(primals_10, buf7, 64, grid=grid(64), stream=stream0)
del primals_10
buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0); del buf1 # reuse
buf9 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [scores, scores_1, scores_2], Original ATen: [aten.div, aten.masked_fill, aten._softmax]
triton_poi_fused__softmax_div_masked_fill_3.run(buf7, buf6, buf8, buf9, 64, grid=grid(64), stream=stream0)
buf10 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf6 # reuse
# Topologically Sorted Source Nodes: [scores, scores_1, scores_2], Original ATen: [aten.div, aten.masked_fill, aten._softmax]
triton_poi_fused__softmax_div_masked_fill_4.run(buf10, buf7, buf8, buf9, 256, grid=grid(256), stream=stream0)
buf11 = reinterpret_tensor(buf9, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf9 # reuse
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.clone]
triton_poi_fused_clone_1.run(buf3, primals_9, buf11, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_9
buf12 = reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 1), 0); del buf3 # reuse
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf10, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf11, (16, 4, 1), (4, 1, 0), 0), out=buf12)
buf13 = reinterpret_tensor(buf8, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf8 # reuse
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
triton_poi_fused_clone_5.run(buf12, buf13, 16, 4, grid=grid(16, 4), stream=stream0)
buf14 = reinterpret_tensor(buf12, (16, 4), (4, 1), 0); del buf12 # reuse
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_12, reinterpret_tensor(buf13, (16, 4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf14)
del primals_12
buf15 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf16 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf17 = buf16; del buf16 # reuse
# Topologically Sorted Source Nodes: [x, mean_1, std_1], Original ATen: [aten.add, aten.mean, aten.std]
triton_poi_fused_add_mean_std_6.run(buf17, primals_2, buf14, buf15, 16, grid=grid(16), stream=stream0)
buf18 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_17, (16, 4), (4, 1), 0), reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), out=buf18)
del primals_15
buf19 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x, mean_1, sub_1, mul_1, std_1, add_3, truediv_2, norm_1], Original ATen: [aten.add, aten.mean, aten.sub, aten.mul, aten.std, aten.div]
triton_poi_fused_add_div_mean_mul_std_sub_7.run(primals_13, primals_2, buf14, buf15, buf17, primals_14, buf19, 64, grid=grid(64), stream=stream0)
del buf15
del buf17
del primals_14
buf20 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf19, (16, 4), (4, 1), 0), reinterpret_tensor(primals_18, (4, 4), (1, 4), 0), out=buf20)
buf21 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_17, (16, 4), (4, 1), 0), reinterpret_tensor(primals_20, (4, 4), (1, 4), 0), out=buf21)
del primals_20
buf22 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_2], Original ATen: [aten.clone]
triton_poi_fused_clone_1.run(buf20, primals_19, buf22, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_19
buf23 = reinterpret_tensor(buf20, (4, 4, 1, 4), (16, 4, 4, 1), 0); del buf20 # reuse
# Topologically Sorted Source Nodes: [matmul_2], Original ATen: [aten.clone]
triton_poi_fused_clone_1.run(buf18, primals_16, buf23, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_16
buf24 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_2], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf22, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf23, (16, 1, 4), (4, 0, 1), 0), out=buf24)
buf25 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [eq_1], Original ATen: [aten.eq]
triton_poi_fused_eq_2.run(primals_22, buf25, 64, grid=grid(64), stream=stream0)
del primals_22
buf26 = reinterpret_tensor(buf18, (4, 4, 4, 1), (16, 4, 1, 64), 0); del buf18 # reuse
buf27 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [scores_1, scores_4, scores_5, scores_6], Original ATen: [aten.masked_fill, aten.div, aten._softmax]
triton_poi_fused__softmax_div_masked_fill_3.run(buf25, buf24, buf26, buf27, 64, grid=grid(64), stream=stream0)
buf28 = reinterpret_tensor(buf24, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf24 # reuse
# Topologically Sorted Source Nodes: [scores_1, scores_4, scores_5, scores_6], Original ATen: [aten.masked_fill, aten.div, aten._softmax]
triton_poi_fused__softmax_div_masked_fill_4.run(buf28, buf25, buf26, buf27, 256, grid=grid(256), stream=stream0)
buf29 = reinterpret_tensor(buf27, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf27 # reuse
# Topologically Sorted Source Nodes: [output_2], Original ATen: [aten.clone]
triton_poi_fused_clone_1.run(buf21, primals_21, buf29, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_21
buf30 = reinterpret_tensor(buf21, (16, 4, 1), (4, 1, 1), 0); del buf21 # reuse
# Topologically Sorted Source Nodes: [output_2], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf28, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf29, (16, 4, 1), (4, 1, 0), 0), out=buf30)
buf31 = reinterpret_tensor(buf26, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf26 # reuse
# Topologically Sorted Source Nodes: [contiguous_1], Original ATen: [aten.clone]
triton_poi_fused_clone_5.run(buf30, buf31, 16, 4, grid=grid(16, 4), stream=stream0)
buf32 = reinterpret_tensor(buf30, (16, 4), (4, 1), 0); del buf30 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf31, (16, 4), (4, 1), 0), reinterpret_tensor(primals_23, (4, 4), (1, 4), 0), out=buf32)
buf33 = reinterpret_tensor(buf32, (4, 4, 4), (16, 4, 1), 0); del buf32 # reuse
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.add]
triton_poi_fused_add_8.run(buf33, primals_2, buf14, primals_24, 64, grid=grid(64), stream=stream0)
del primals_24
buf34 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mean_2, sub_2, mul_2, std_2, add_6, truediv_4, norm_2], Original ATen: [aten.mean, aten.sub, aten.mul, aten.std, aten.add, aten.div]
triton_poi_fused_add_div_mean_mul_std_sub_0.run(primals_25, buf33, primals_26, buf34, 64, grid=grid(64), stream=stream0)
del primals_26
buf35 = empty_strided_cuda((16, 2048), (2048, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf34, (16, 4), (4, 1), 0), reinterpret_tensor(primals_27, (4, 2048), (1, 4), 0), out=buf35)
buf36 = reinterpret_tensor(buf35, (4, 4, 2048), (8192, 2048, 1), 0); del buf35 # reuse
buf39 = empty_strided_cuda((4, 4, 2048), (8192, 2048, 1), torch.bool)
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_9.run(buf36, primals_28, buf39, 32768, grid=grid(32768), stream=stream0)
del primals_28
buf37 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf36, (16, 2048), (2048, 1), 0), reinterpret_tensor(primals_29, (2048, 4), (1, 2048), 0), out=buf37)
buf38 = reinterpret_tensor(buf37, (4, 4, 4), (16, 4, 1), 0); del buf37 # reuse
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.add]
triton_poi_fused_add_10.run(buf38, buf33, primals_30, 64, grid=grid(64), stream=stream0)
del primals_30
return (buf38, primals_2, primals_13, primals_25, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), buf7, buf10, reinterpret_tensor(buf13, (16, 4), (4, 1), 0), buf14, reinterpret_tensor(primals_17, (16, 4), (4, 1), 0), reinterpret_tensor(buf19, (16, 4), (4, 1), 0), buf25, buf28, reinterpret_tensor(buf31, (16, 4), (4, 1), 0), buf33, reinterpret_tensor(buf34, (16, 4), (4, 1), 0), reinterpret_tensor(buf36, (16, 2048), (2048, 1), 0), primals_29, buf39, primals_27, primals_23, reinterpret_tensor(buf29, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf22, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf23, (16, 4, 1), (4, 1, 4), 0), primals_18, primals_11, reinterpret_tensor(buf11, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 4), 0), primals_8, primals_6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_21 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_22 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_23 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_24 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_25 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_26 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_27 = rand_strided((2048, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_28 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_29 = rand_strided((4, 2048), (2048, 1), device='cuda:0', dtype=torch.float32)
primals_30 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn as nn
import torch.nn.functional as F
def attention(q, k, v, d_k, mask=None, dropout=None):
scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None:
mask = mask.unsqueeze(1)
scores = scores.masked_fill(mask == 0, -1000000000.0)
scores = F.softmax(scores, dim=-1)
if dropout is not None:
scores = dropout(scores)
output = torch.matmul(scores, v)
return output
class FeedForward(nn.Module):
def __init__(self, d_model, d_ff=2048, dropout=0.1):
super().__init__()
self.linear_1 = nn.Linear(d_model, d_ff)
self.dropout = nn.Dropout(dropout)
self.linear_2 = nn.Linear(d_ff, d_model)
def forward(self, x):
x = self.dropout(F.relu(self.linear_1(x)))
x = self.linear_2(x)
return x
class MultiHeadAttention(nn.Module):
def __init__(self, heads, d_model, dropout=0.1):
super().__init__()
self.d_model = d_model
self.d_k = d_model // heads
self.h = heads
self.q_linear = nn.Linear(d_model, d_model)
self.v_linear = nn.Linear(d_model, d_model)
self.k_linear = nn.Linear(d_model, d_model)
self.dropout = nn.Dropout(dropout)
self.out = nn.Linear(d_model, d_model)
def forward(self, q, k, v, mask=None):
bs = q.size(0)
k = self.k_linear(k).view(bs, -1, self.h, self.d_k)
q = self.q_linear(q).view(bs, -1, self.h, self.d_k)
v = self.v_linear(v).view(bs, -1, self.h, self.d_k)
k = k.transpose(1, 2)
q = q.transpose(1, 2)
v = v.transpose(1, 2)
scores = attention(q, k, v, self.d_k, mask, self.dropout)
concat = scores.transpose(1, 2).contiguous().view(bs, -1, self.d_model)
output = self.out(concat)
return output
class Norm(nn.Module):
def __init__(self, d_model, eps=1e-06):
super().__init__()
self.size = d_model
self.alpha = nn.Parameter(torch.ones(self.size))
self.bias = nn.Parameter(torch.zeros(self.size))
self.eps = eps
def forward(self, x):
norm = self.alpha * (x - x.mean(dim=-1, keepdim=True)) / (x.std(dim
=-1, keepdim=True) + self.eps) + self.bias
return norm
class DecoderLayer(nn.Module):
def __init__(self, d_model, heads, dropout=0.1):
super().__init__()
self.norm_1 = Norm(d_model)
self.norm_2 = Norm(d_model)
self.norm_3 = Norm(d_model)
self.dropout_1 = nn.Dropout(dropout)
self.dropout_2 = nn.Dropout(dropout)
self.dropout_3 = nn.Dropout(dropout)
self.attn_1 = MultiHeadAttention(heads, d_model, dropout=dropout)
self.attn_2 = MultiHeadAttention(heads, d_model, dropout=dropout)
self.ff = FeedForward(d_model, dropout=dropout)
def forward(self, x, e_outputs, src_mask, trg_mask):
x2 = self.norm_1(x)
x = x + self.dropout_1(self.attn_1(x2, x2, x2, trg_mask))
x2 = self.norm_2(x)
x = x + self.dropout_2(self.attn_2(x2, e_outputs, e_outputs, src_mask))
x2 = self.norm_3(x)
x = x + self.dropout_3(self.ff(x2))
return x
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4,
4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'heads': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_div_mean_mul_std_sub_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = 4.0
tmp10 = tmp8 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp0 * tmp11
tmp13 = tmp2 - tmp10
tmp14 = tmp13 * tmp13
tmp15 = tmp3 - tmp10
tmp16 = tmp15 * tmp15
tmp17 = tmp14 + tmp16
tmp18 = tmp5 - tmp10
tmp19 = tmp18 * tmp18
tmp20 = tmp17 + tmp19
tmp21 = tmp7 - tmp10
tmp22 = tmp21 * tmp21
tmp23 = tmp20 + tmp22
tmp24 = 3.0
tmp25 = tmp23 / tmp24
tmp26 = libdevice.sqrt(tmp25)
tmp27 = 1e-06
tmp28 = tmp26 + tmp27
tmp29 = tmp12 / tmp28
tmp31 = tmp29 + tmp30
tl.store(out_ptr0 + x2, tmp31, xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_eq_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 == tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_div_masked_fill_3(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (4 * x0 + 16 * x2), xmask, eviction_policy=
'evict_last').to(tl.int1)
tmp1 = tl.load(in_ptr1 + 4 * x3, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0 + 16 * x2), xmask, eviction_policy
='evict_last').to(tl.int1)
tmp7 = tl.load(in_ptr1 + (1 + 4 * x3), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (2 + 4 * x0 + 16 * x2), xmask,
eviction_policy='evict_last').to(tl.int1)
tmp12 = tl.load(in_ptr1 + (2 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp16 = tl.load(in_ptr0 + (3 + 4 * x0 + 16 * x2), xmask,
eviction_policy='evict_last').to(tl.int1)
tmp17 = tl.load(in_ptr1 + (3 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp4 = -1000000000.0
tmp5 = tl.where(tmp0, tmp4, tmp3)
tmp8 = tmp7 * tmp2
tmp9 = tl.where(tmp6, tmp4, tmp8)
tmp10 = triton_helpers.maximum(tmp5, tmp9)
tmp13 = tmp12 * tmp2
tmp14 = tl.where(tmp11, tmp4, tmp13)
tmp15 = triton_helpers.maximum(tmp10, tmp14)
tmp18 = tmp17 * tmp2
tmp19 = tl.where(tmp16, tmp4, tmp18)
tmp20 = triton_helpers.maximum(tmp15, tmp19)
tmp21 = tmp5 - tmp20
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp9 - tmp20
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp26 = tmp14 - tmp20
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp25 + tmp27
tmp29 = tmp19 - tmp20
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp28 + tmp30
tl.store(out_ptr0 + x3, tmp20, xmask)
tl.store(out_ptr1 + x3, tmp31, xmask)
@triton.jit
def triton_poi_fused__softmax_div_masked_fill_4(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex // 64
x4 = xindex % 16
x5 = xindex
x6 = xindex // 4
tmp0 = tl.load(in_ptr0 + (x4 + 16 * x3), xmask, eviction_policy=
'evict_last').to(tl.int1)
tmp1 = tl.load(in_out_ptr0 + x5, xmask)
tmp6 = tl.load(in_ptr1 + x6, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + x6, xmask, eviction_policy='evict_last')
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp4 = -1000000000.0
tmp5 = tl.where(tmp0, tmp4, tmp3)
tmp7 = tmp5 - tmp6
tmp8 = tl_math.exp(tmp7)
tmp10 = tmp8 / tmp9
tl.store(in_out_ptr0 + x5, tmp10, xmask)
@triton.jit
def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_mean_std_6(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = 3.0
tmp29 = tmp27 / tmp28
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(in_out_ptr0 + x0, tmp29, xmask)
@triton.jit
def triton_poi_fused_add_div_mean_mul_std_sub_7(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x2, xmask)
tmp4 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 - tmp4
tmp6 = tmp0 * tmp5
tmp8 = libdevice.sqrt(tmp7)
tmp9 = 1e-06
tmp10 = tmp8 + tmp9
tmp11 = tmp6 / tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused_add_8(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_out_ptr0 + x2, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_9(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 2048
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_add_10(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_out_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27,
primals_28, primals_29, primals_30) = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_11, (4, 4), (4, 1))
assert_size_stride(primals_12, (4,), (1,))
assert_size_stride(primals_13, (4,), (1,))
assert_size_stride(primals_14, (4,), (1,))
assert_size_stride(primals_15, (4, 4), (4, 1))
assert_size_stride(primals_16, (4,), (1,))
assert_size_stride(primals_17, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_18, (4, 4), (4, 1))
assert_size_stride(primals_19, (4,), (1,))
assert_size_stride(primals_20, (4, 4), (4, 1))
assert_size_stride(primals_21, (4,), (1,))
assert_size_stride(primals_22, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_23, (4, 4), (4, 1))
assert_size_stride(primals_24, (4,), (1,))
assert_size_stride(primals_25, (4,), (1,))
assert_size_stride(primals_26, (4,), (1,))
assert_size_stride(primals_27, (2048, 4), (4, 1))
assert_size_stride(primals_28, (2048,), (1,))
assert_size_stride(primals_29, (4, 2048), (2048, 1))
assert_size_stride(primals_30, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mean_mul_std_sub_0[grid(64)](primals_1,
primals_2, primals_3, buf0, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del primals_1
del primals_3
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2)
buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_1[grid(16, 4)](buf2, primals_7, buf4, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_7
buf5 = reinterpret_tensor(buf2, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf2
triton_poi_fused_clone_1[grid(16, 4)](buf1, primals_5, buf5, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), out=buf6)
buf7 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.bool)
triton_poi_fused_eq_2[grid(64)](primals_10, buf7, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_10
buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0)
del buf1
buf9 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__softmax_div_masked_fill_3[grid(64)](buf7, buf6,
buf8, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf10 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf6
triton_poi_fused__softmax_div_masked_fill_4[grid(256)](buf10, buf7,
buf8, buf9, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf11 = reinterpret_tensor(buf9, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf9
triton_poi_fused_clone_1[grid(16, 4)](buf3, primals_9, buf11, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_9
buf12 = reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 1), 0)
del buf3
extern_kernels.bmm(reinterpret_tensor(buf10, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf11, (16, 4, 1), (4, 1, 0), 0), out=buf12)
buf13 = reinterpret_tensor(buf8, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf8
triton_poi_fused_clone_5[grid(16, 4)](buf12, buf13, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf14 = reinterpret_tensor(buf12, (16, 4), (4, 1), 0)
del buf12
extern_kernels.addmm(primals_12, reinterpret_tensor(buf13, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf14)
del primals_12
buf15 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf16 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf17 = buf16
del buf16
triton_poi_fused_add_mean_std_6[grid(16)](buf17, primals_2, buf14,
buf15, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf18 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_17, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), out=buf18)
del primals_15
buf19 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_div_mean_mul_std_sub_7[grid(64)](primals_13,
primals_2, buf14, buf15, buf17, primals_14, buf19, 64, XBLOCK=
64, num_warps=1, num_stages=1)
del buf15
del buf17
del primals_14
buf20 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf19, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_18, (4, 4), (1, 4), 0), out=buf20)
buf21 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_17, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_20, (4, 4), (1, 4), 0), out=buf21)
del primals_20
buf22 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_1[grid(16, 4)](buf20, primals_19, buf22, 16,
4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_19
buf23 = reinterpret_tensor(buf20, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf20
triton_poi_fused_clone_1[grid(16, 4)](buf18, primals_16, buf23, 16,
4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_16
buf24 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf22, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf23, (16, 1, 4), (4, 0, 1), 0), out=buf24)
buf25 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.bool)
triton_poi_fused_eq_2[grid(64)](primals_22, buf25, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_22
buf26 = reinterpret_tensor(buf18, (4, 4, 4, 1), (16, 4, 1, 64), 0)
del buf18
buf27 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__softmax_div_masked_fill_3[grid(64)](buf25, buf24,
buf26, buf27, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf28 = reinterpret_tensor(buf24, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf24
triton_poi_fused__softmax_div_masked_fill_4[grid(256)](buf28, buf25,
buf26, buf27, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf29 = reinterpret_tensor(buf27, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf27
triton_poi_fused_clone_1[grid(16, 4)](buf21, primals_21, buf29, 16,
4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_21
buf30 = reinterpret_tensor(buf21, (16, 4, 1), (4, 1, 1), 0)
del buf21
extern_kernels.bmm(reinterpret_tensor(buf28, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf29, (16, 4, 1), (4, 1, 0), 0), out=buf30)
buf31 = reinterpret_tensor(buf26, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf26
triton_poi_fused_clone_5[grid(16, 4)](buf30, buf31, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf32 = reinterpret_tensor(buf30, (16, 4), (4, 1), 0)
del buf30
extern_kernels.mm(reinterpret_tensor(buf31, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_23, (4, 4), (1, 4), 0), out=buf32)
buf33 = reinterpret_tensor(buf32, (4, 4, 4), (16, 4, 1), 0)
del buf32
triton_poi_fused_add_8[grid(64)](buf33, primals_2, buf14,
primals_24, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_24
buf34 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_div_mean_mul_std_sub_0[grid(64)](primals_25,
buf33, primals_26, buf34, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_26
buf35 = empty_strided_cuda((16, 2048), (2048, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf34, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_27, (4, 2048), (1, 4), 0), out=buf35)
buf36 = reinterpret_tensor(buf35, (4, 4, 2048), (8192, 2048, 1), 0)
del buf35
buf39 = empty_strided_cuda((4, 4, 2048), (8192, 2048, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_9[grid(32768)](buf36,
primals_28, buf39, 32768, XBLOCK=128, num_warps=4, num_stages=1)
del primals_28
buf37 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf36, (16, 2048), (2048, 1),
0), reinterpret_tensor(primals_29, (2048, 4), (1, 2048), 0),
out=buf37)
buf38 = reinterpret_tensor(buf37, (4, 4, 4), (16, 4, 1), 0)
del buf37
triton_poi_fused_add_10[grid(64)](buf38, buf33, primals_30, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_30
return buf38, primals_2, primals_13, primals_25, reinterpret_tensor(buf0,
(16, 4), (4, 1), 0), buf7, buf10, reinterpret_tensor(buf13, (16, 4),
(4, 1), 0), buf14, reinterpret_tensor(primals_17, (16, 4), (4, 1), 0
), reinterpret_tensor(buf19, (16, 4), (4, 1), 0
), buf25, buf28, reinterpret_tensor(buf31, (16, 4), (4, 1), 0
), buf33, reinterpret_tensor(buf34, (16, 4), (4, 1), 0
), reinterpret_tensor(buf36, (16, 2048), (2048, 1), 0
), primals_29, buf39, primals_27, primals_23, reinterpret_tensor(buf29,
(16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf22, (16, 1, 4), (4,
1, 1), 0), reinterpret_tensor(buf23, (16, 4, 1), (4, 1, 4), 0
), primals_18, primals_11, reinterpret_tensor(buf11, (16, 1, 4), (4,
1, 1), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 4), 0
), primals_8, primals_6, primals_4
def attention(q, k, v, d_k, mask=None, dropout=None):
scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None:
mask = mask.unsqueeze(1)
scores = scores.masked_fill(mask == 0, -1000000000.0)
scores = F.softmax(scores, dim=-1)
if dropout is not None:
scores = dropout(scores)
output = torch.matmul(scores, v)
return output
class FeedForward(nn.Module):
def __init__(self, d_model, d_ff=2048, dropout=0.1):
super().__init__()
self.linear_1 = nn.Linear(d_model, d_ff)
self.dropout = nn.Dropout(dropout)
self.linear_2 = nn.Linear(d_ff, d_model)
def forward(self, x):
x = self.dropout(F.relu(self.linear_1(x)))
x = self.linear_2(x)
return x
class MultiHeadAttention(nn.Module):
def __init__(self, heads, d_model, dropout=0.1):
super().__init__()
self.d_model = d_model
self.d_k = d_model // heads
self.h = heads
self.q_linear = nn.Linear(d_model, d_model)
self.v_linear = nn.Linear(d_model, d_model)
self.k_linear = nn.Linear(d_model, d_model)
self.dropout = nn.Dropout(dropout)
self.out = nn.Linear(d_model, d_model)
def forward(self, q, k, v, mask=None):
bs = q.size(0)
k = self.k_linear(k).view(bs, -1, self.h, self.d_k)
q = self.q_linear(q).view(bs, -1, self.h, self.d_k)
v = self.v_linear(v).view(bs, -1, self.h, self.d_k)
k = k.transpose(1, 2)
q = q.transpose(1, 2)
v = v.transpose(1, 2)
scores = attention(q, k, v, self.d_k, mask, self.dropout)
concat = scores.transpose(1, 2).contiguous().view(bs, -1, self.d_model)
output = self.out(concat)
return output
class Norm(nn.Module):
def __init__(self, d_model, eps=1e-06):
super().__init__()
self.size = d_model
self.alpha = nn.Parameter(torch.ones(self.size))
self.bias = nn.Parameter(torch.zeros(self.size))
self.eps = eps
def forward(self, x):
norm = self.alpha * (x - x.mean(dim=-1, keepdim=True)) / (x.std(dim
=-1, keepdim=True) + self.eps) + self.bias
return norm
class DecoderLayerNew(nn.Module):
def __init__(self, d_model, heads, dropout=0.1):
super().__init__()
self.norm_1 = Norm(d_model)
self.norm_2 = Norm(d_model)
self.norm_3 = Norm(d_model)
self.dropout_1 = nn.Dropout(dropout)
self.dropout_2 = nn.Dropout(dropout)
self.dropout_3 = nn.Dropout(dropout)
self.attn_1 = MultiHeadAttention(heads, d_model, dropout=dropout)
self.attn_2 = MultiHeadAttention(heads, d_model, dropout=dropout)
self.ff = FeedForward(d_model, dropout=dropout)
def forward(self, input_0, input_1, input_2, input_3):
primals_1 = self.norm_1.alpha
primals_3 = self.norm_1.bias
primals_5 = self.norm_2.alpha
primals_7 = self.norm_2.bias
primals_9 = self.norm_3.alpha
primals_12 = self.norm_3.bias
primals_4 = self.attn_1.q_linear.weight
primals_13 = self.attn_1.q_linear.bias
primals_6 = self.attn_1.v_linear.weight
primals_14 = self.attn_1.v_linear.bias
primals_8 = self.attn_1.k_linear.weight
primals_16 = self.attn_1.k_linear.bias
primals_11 = self.attn_1.out.weight
primals_19 = self.attn_1.out.bias
primals_15 = self.attn_2.q_linear.weight
primals_21 = self.attn_2.q_linear.bias
primals_18 = self.attn_2.v_linear.weight
primals_24 = self.attn_2.v_linear.bias
primals_20 = self.attn_2.k_linear.weight
primals_25 = self.attn_2.k_linear.bias
primals_23 = self.attn_2.out.weight
primals_26 = self.attn_2.out.bias
primals_27 = self.ff.linear_1.weight
primals_28 = self.ff.linear_1.bias
primals_29 = self.ff.linear_2.weight
primals_30 = self.ff.linear_2.bias
primals_2 = input_0
primals_10 = input_1
primals_17 = input_2
primals_22 = input_3
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28, primals_29,
primals_30])
return output[0]
| congson1293/Transformer | DecoderLayer | false | 1,759 | [
"Apache-2.0"
] | 0 | 249638f3287e0ed11c71496178fe2ceac2d758df | https://github.com/congson1293/Transformer/tree/249638f3287e0ed11c71496178fe2ceac2d758df | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
def attention(q, k, v, d_k, mask=None, dropout=None):
scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None:
mask = mask.unsqueeze(1)
scores = scores.masked_fill(mask == 0, -1000000000.0)
scores = F.softmax(scores, dim=-1)
if dropout is not None:
scores = dropout(scores)
output = torch.matmul(scores, v)
return output
class FeedForward(nn.Module):
def __init__(self, d_model, d_ff=2048, dropout=0.1):
super().__init__()
self.linear_1 = nn.Linear(d_model, d_ff)
self.dropout = nn.Dropout(dropout)
self.linear_2 = nn.Linear(d_ff, d_model)
def forward(self, x):
x = self.dropout(F.relu(self.linear_1(x)))
x = self.linear_2(x)
return x
class MultiHeadAttention(nn.Module):
def __init__(self, heads, d_model, dropout=0.1):
super().__init__()
self.d_model = d_model
self.d_k = d_model // heads
self.h = heads
self.q_linear = nn.Linear(d_model, d_model)
self.v_linear = nn.Linear(d_model, d_model)
self.k_linear = nn.Linear(d_model, d_model)
self.dropout = nn.Dropout(dropout)
self.out = nn.Linear(d_model, d_model)
def forward(self, q, k, v, mask=None):
bs = q.size(0)
k = self.k_linear(k).view(bs, -1, self.h, self.d_k)
q = self.q_linear(q).view(bs, -1, self.h, self.d_k)
v = self.v_linear(v).view(bs, -1, self.h, self.d_k)
k = k.transpose(1, 2)
q = q.transpose(1, 2)
v = v.transpose(1, 2)
scores = attention(q, k, v, self.d_k, mask, self.dropout)
concat = scores.transpose(1, 2).contiguous().view(bs, -1, self.d_model)
output = self.out(concat)
return output
class Norm(nn.Module):
def __init__(self, d_model, eps=1e-06):
super().__init__()
self.size = d_model
self.alpha = nn.Parameter(torch.ones(self.size))
self.bias = nn.Parameter(torch.zeros(self.size))
self.eps = eps
def forward(self, x):
norm = self.alpha * (x - x.mean(dim=-1, keepdim=True)) / (x.std(dim
=-1, keepdim=True) + self.eps) + self.bias
return norm
class Model(nn.Module):
def __init__(self, d_model, heads, dropout=0.1):
super().__init__()
self.norm_1 = Norm(d_model)
self.norm_2 = Norm(d_model)
self.norm_3 = Norm(d_model)
self.dropout_1 = nn.Dropout(dropout)
self.dropout_2 = nn.Dropout(dropout)
self.dropout_3 = nn.Dropout(dropout)
self.attn_1 = MultiHeadAttention(heads, d_model, dropout=dropout)
self.attn_2 = MultiHeadAttention(heads, d_model, dropout=dropout)
self.ff = FeedForward(d_model, dropout=dropout)
def forward(self, x, e_outputs, src_mask, trg_mask):
x2 = self.norm_1(x)
x = x + self.dropout_1(self.attn_1(x2, x2, x2, trg_mask))
x2 = self.norm_2(x)
x = x + self.dropout_2(self.attn_2(x2, e_outputs, e_outputs, src_mask))
x2 = self.norm_3(x)
x = x + self.dropout_3(self.ff(x2))
return x
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4,
4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
DenseSAGEConv | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/ia/ciadpx4ik7ocmsqscox73ya2qxek7ekkw25w6rijleysqp7lfliw.py
# Topologically Sorted Source Nodes: [setitem], Original ATen: [aten.lift_fresh, aten.index_put]
# Source node to ATen node mapping:
# setitem => full_default, index_put
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 1.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %index_put : [num_users=2] = call_function[target=torch.ops.aten.index_put.default](args = (%primals_2, [None, %iota, %iota], %full_default), kwargs = {})
triton_poi_fused_index_put_lift_fresh_0 = async_compile.triton('triton_poi_fused_index_put_lift_fresh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_put_lift_fresh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_index_put_lift_fresh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/3a/c3ajks6gn2yuzdnbqm5gdhl6nslpowgffuevr4gjguhn4q6d6v4b.py
# Topologically Sorted Source Nodes: [setitem], Original ATen: [aten.lift_fresh, aten.index_put]
# Source node to ATen node mapping:
# setitem => full_default, index_put
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 1.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %index_put : [num_users=2] = call_function[target=torch.ops.aten.index_put.default](args = (%primals_2, [None, %iota, %iota], %full_default), kwargs = {})
triton_poi_fused_index_put_lift_fresh_1 = async_compile.triton('triton_poi_fused_index_put_lift_fresh_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_put_lift_fresh_1', 'mutated_arg_names': ['out_ptr0'], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_index_put_lift_fresh_1(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = 1.0
tl.store(out_ptr0 + ((5*x0) + (16*x1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/y3/cy35gbcd6bgnd5eyw3uhjdcrbocdx6nthwpvgxcbrze5uqo6n2dp.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# out => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/f7/cf7frvp5h5tfdj7h2wqm2oydwprwzs74er32f2mngowdmgfjqre4.py
# Topologically Sorted Source Nodes: [sum_1, clamp, out_1], Original ATen: [aten.sum, aten.clamp, aten.div]
# Source node to ATen node mapping:
# clamp => clamp_min
# out_1 => div
# sum_1 => sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%index_put, [-1], True), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sum_1, 1), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_2, %clamp_min), kwargs = {})
triton_poi_fused_clamp_div_sum_3 = async_compile.triton('triton_poi_fused_clamp_div_sum_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_div_sum_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clamp_div_sum_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 16
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 1.0
tmp9 = triton_helpers.maximum(tmp7, tmp8)
tmp10 = tmp0 / tmp9
tl.store(in_out_ptr0 + (x3), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/5a/c5ainvzw5uinosmtjpfsqml2wdleittg5twoainmhbtwdqayr4sw.py
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.add]
# Source node to ATen node mapping:
# out_3 => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_4, %primals_4), kwargs = {})
triton_poi_fused_add_4 = async_compile.triton('triton_poi_fused_add_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [setitem], Original ATen: [aten.lift_fresh, aten.index_put]
stream0 = get_raw_stream(0)
triton_poi_fused_index_put_lift_fresh_0.run(primals_2, buf0, 64, grid=grid(64), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [setitem], Original ATen: [aten.lift_fresh, aten.index_put]
triton_poi_fused_index_put_lift_fresh_1.run(buf0, 16, grid=grid(16), stream=stream0)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(buf0, buf2, 256, grid=grid(256), stream=stream0)
buf3 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_1, (16, 4, 4), (16, 4, 1), 0), out=buf3)
del primals_1
buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf3 # reuse
# Topologically Sorted Source Nodes: [sum_1, clamp, out_1], Original ATen: [aten.sum, aten.clamp, aten.div]
triton_poi_fused_clamp_div_sum_3.run(buf4, buf0, 256, grid=grid(256), stream=stream0)
del buf0
buf5 = reinterpret_tensor(buf2, (64, 4), (4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf4, (64, 4), (4, 1), 0), primals_3, out=buf5)
del primals_3
buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.add]
triton_poi_fused_add_4.run(buf6, primals_4, 256, grid=grid(256), stream=stream0)
del primals_4
return (buf6, reinterpret_tensor(buf4, (4, 64), (1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn.functional as F
from torch.nn import Parameter
import torch.utils.data
def uniform(size, tensor):
bound = 1.0 / math.sqrt(size)
if tensor is not None:
tensor.data.uniform_(-bound, bound)
class DenseSAGEConv(torch.nn.Module):
"""See :class:`torch_geometric.nn.conv.SAGEConv`.
"""
def __init__(self, in_channels, out_channels, normalize=False, bias=True):
super(DenseSAGEConv, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.normalize = normalize
self.weight = Parameter(torch.Tensor(self.in_channels, out_channels))
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
uniform(self.in_channels, self.weight)
uniform(self.in_channels, self.bias)
def forward(self, x, adj, mask=None, add_loop=True):
"""
Args:
x (Tensor): Node feature tensor :math:`\\mathbf{X} \\in \\mathbb{R}^{B
\\times N \\times F}`, with batch-size :math:`B`, (maximum)
number of nodes :math:`N` for each graph, and feature
dimension :math:`F`.
adj (Tensor): Adjacency tensor :math:`\\mathbf{A} \\in \\mathbb{R}^{B
\\times N \\times N}`. The adjacency tensor is broadcastable in
the batch dimension, resulting in a shared adjacency matrix for
the complete batch.
mask (BoolTensor, optional): Mask matrix
:math:`\\mathbf{M} \\in {\\{ 0, 1 \\}}^{B \\times N}` indicating
the valid nodes for each graph. (default: :obj:`None`)
add_loop (bool, optional): If set to :obj:`False`, the layer will
not automatically add self-loops to the adjacency matrices.
(default: :obj:`True`)
"""
x = x.unsqueeze(0) if x.dim() == 2 else x
adj = adj.unsqueeze(0) if adj.dim() == 2 else adj
B, N, _ = adj.size()
if add_loop:
adj = adj.clone()
idx = torch.arange(N, dtype=torch.long, device=adj.device)
adj[:, idx, idx] = 1
out = torch.matmul(adj, x)
out = out / adj.sum(dim=-1, keepdim=True).clamp(min=1)
out = torch.matmul(out, self.weight)
if self.bias is not None:
out = out + self.bias
if self.normalize:
out = F.normalize(out, p=2, dim=-1)
if mask is not None:
out = out * mask.view(B, N, 1)
return out
def __repr__(self):
return '{}({}, {})'.format(self.__class__.__name__, self.
in_channels, self.out_channels)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import math
from torch.nn import Parameter
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_index_put_lift_fresh_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_index_put_lift_fresh_1(out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
tmp0 = 1.0
tl.store(out_ptr0 + (5 * x0 + 16 * x1), tmp0, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x2 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_clamp_div_sum_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 1.0
tmp9 = triton_helpers.maximum(tmp7, tmp8)
tmp10 = tmp0 / tmp9
tl.store(in_out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_index_put_lift_fresh_0[grid(64)](primals_2, buf0,
64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_2
triton_poi_fused_index_put_lift_fresh_1[grid(16)](buf0, 16, XBLOCK=
16, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_2[grid(256)](buf0, buf2, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf3 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(primals_1, (16, 4, 4), (16, 4, 1), 0),
out=buf3)
del primals_1
buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf3
triton_poi_fused_clamp_div_sum_3[grid(256)](buf4, buf0, 256, XBLOCK
=256, num_warps=4, num_stages=1)
del buf0
buf5 = reinterpret_tensor(buf2, (64, 4), (4, 1), 0)
del buf2
extern_kernels.mm(reinterpret_tensor(buf4, (64, 4), (4, 1), 0),
primals_3, out=buf5)
del primals_3
buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused_add_4[grid(256)](buf6, primals_4, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_4
return buf6, reinterpret_tensor(buf4, (4, 64), (1, 4), 0)
def uniform(size, tensor):
bound = 1.0 / math.sqrt(size)
if tensor is not None:
tensor.data.uniform_(-bound, bound)
class DenseSAGEConvNew(torch.nn.Module):
"""See :class:`torch_geometric.nn.conv.SAGEConv`.
"""
def __init__(self, in_channels, out_channels, normalize=False, bias=True):
super(DenseSAGEConvNew, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.normalize = normalize
self.weight = Parameter(torch.Tensor(self.in_channels, out_channels))
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
uniform(self.in_channels, self.weight)
uniform(self.in_channels, self.bias)
def __repr__(self):
return '{}({}, {})'.format(self.__class__.__name__, self.
in_channels, self.out_channels)
def forward(self, input_0, input_1):
primals_3 = self.weight
primals_4 = self.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| cshjin/pytorch_geometric | DenseSAGEConv | false | 1,760 | [
"MIT"
] | 0 | 8dd0e76beb72135949a275edd851f80f7b97648f | https://github.com/cshjin/pytorch_geometric/tree/8dd0e76beb72135949a275edd851f80f7b97648f | import math
import torch
import torch.nn.functional as F
from torch.nn import Parameter
import torch.utils.data
def uniform(size, tensor):
bound = 1.0 / math.sqrt(size)
if tensor is not None:
tensor.data.uniform_(-bound, bound)
class Model(torch.nn.Module):
"""See :class:`torch_geometric.nn.conv.SAGEConv`.
"""
def __init__(self, in_channels, out_channels, normalize=False, bias=True):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.normalize = normalize
self.weight = Parameter(torch.Tensor(self.in_channels, out_channels))
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
uniform(self.in_channels, self.weight)
uniform(self.in_channels, self.bias)
def forward(self, x, adj, mask=None, add_loop=True):
"""
Args:
x (Tensor): Node feature tensor :math:`\\mathbf{X} \\in \\mathbb{R}^{B
\\times N \\times F}`, with batch-size :math:`B`, (maximum)
number of nodes :math:`N` for each graph, and feature
dimension :math:`F`.
adj (Tensor): Adjacency tensor :math:`\\mathbf{A} \\in \\mathbb{R}^{B
\\times N \\times N}`. The adjacency tensor is broadcastable in
the batch dimension, resulting in a shared adjacency matrix for
the complete batch.
mask (BoolTensor, optional): Mask matrix
:math:`\\mathbf{M} \\in {\\{ 0, 1 \\}}^{B \\times N}` indicating
the valid nodes for each graph. (default: :obj:`None`)
add_loop (bool, optional): If set to :obj:`False`, the layer will
not automatically add self-loops to the adjacency matrices.
(default: :obj:`True`)
"""
x = x.unsqueeze(0) if x.dim() == 2 else x
adj = adj.unsqueeze(0) if adj.dim() == 2 else adj
B, N, _ = adj.size()
if add_loop:
adj = adj.clone()
idx = torch.arange(N, dtype=torch.long, device=adj.device)
adj[:, idx, idx] = 1
out = torch.matmul(adj, x)
out = out / adj.sum(dim=-1, keepdim=True).clamp(min=1)
out = torch.matmul(out, self.weight)
if self.bias is not None:
out = out + self.bias
if self.normalize:
out = F.normalize(out, p=2, dim=-1)
if mask is not None:
out = out * mask.view(B, N, 1)
return out
def __repr__(self):
return '{}({}, {})'.format(self.__class__.__name__, self.
in_channels, self.out_channels)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
LinearExcitability | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/v6/cv6odvhmmcyvquog4eo62pdliew53orxzwe2wfzampr64jy3ppa7.py
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.add]
# Source node to ATen node mapping:
# output_1 => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_2), kwargs = {})
triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0)
del primals_2
return (buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
from torch import nn
from torch.nn.parameter import Parameter
def linearExcitability(input, weight, excitability=None, bias=None):
"""Applies a linear transformation to the incoming data: :math:`y = c(xA^T) + b`.
Shape:
- input: :math:`(N, *, in_features)`
- weight: :math:`(out_features, in_features)`
- excitability: :math:`(out_features)`
- bias: :math:`(out_features)`
- output: :math:`(N, *, out_features)`
(NOTE: `*` means any number of additional dimensions)"""
if excitability is not None:
output = torch.matmul(input, weight.t()) * excitability
else:
output = torch.matmul(input, weight.t())
if bias is not None:
output = output + bias
return output
class LinearExcitability(nn.Module):
"""Module for a linear transformation with multiplicative excitability-parameter (i.e., learnable) and/or -buffer.
Args:
in_features: size of each input sample
out_features: size of each output sample
bias: if 'False', layer will not learn an additive bias-parameter (DEFAULT=True)
excitability: if 'True', layer will learn a multiplicative excitability-parameter (DEFAULT=False)
excit_buffer: if 'True', layer will have excitability-buffer whose value can be set (DEFAULT=False)
Shape:
- input: :math:`(N, *, in_features)` where `*` means any number of additional dimensions
- output: :math:`(N, *, out_features)` where all but the last dimension are the same shape as the input.
Attributes:
weight: the learnable weights of the module of shape (out_features x in_features)
excitability: the learnable multiplication terms (out_features)
bias: the learnable bias of the module of shape (out_features)
excit_buffer: fixed multiplication variable (out_features)"""
def __init__(self, in_features, out_features, bias=True, excitability=
False, excit_buffer=False):
super(LinearExcitability, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.Tensor(out_features, in_features))
if excitability:
self.excitability = Parameter(torch.Tensor(out_features))
else:
self.register_parameter('excitability', None)
if bias:
self.bias = Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
if excit_buffer:
buffer = torch.ones(out_features)
self.register_buffer('excit_buffer', buffer)
else:
self.register_buffer('excit_buffer', None)
self.reset_parameters()
def reset_parameters(self):
"""Modifies the parameters to initialize / reset them at appropriate values."""
"""Avoid using in-place init methods"""
stdv = 1.0 / math.sqrt(self.weight.size(1))
self.weight = Parameter(torch.distributions.Uniform(-stdv, stdv).
sample(self.weight.size()))
if self.excitability is not None:
self.excitability = Parameter(torch.ones_like(self.excitability))
if self.bias is not None:
self.bias = Parameter(torch.distributions.Uniform(-stdv, stdv).
sample(self.bias.size()))
def forward(self, input):
"""Running this model's forward step requires/returns:
-[input]: [batch_size]x[...]x[in_features]
-[output]: [batch_size]x[...]x[hidden_features]"""
if self.excit_buffer is None:
excitability = self.excitability
elif self.excitability is None:
excitability = self.excit_buffer
else:
excitability = self.excitability * self.excit_buffer
return linearExcitability(input, self.weight, excitability, self.bias)
def __repr__(self):
return self.__class__.__name__ + '(' + 'in_features=' + str(self.
in_features) + ', out_features=' + str(self.out_features) + ')'
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
from torch import nn
from torch.nn.parameter import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_add_0[grid(256)](buf1, primals_2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_2
return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0)
def linearExcitability(input, weight, excitability=None, bias=None):
"""Applies a linear transformation to the incoming data: :math:`y = c(xA^T) + b`.
Shape:
- input: :math:`(N, *, in_features)`
- weight: :math:`(out_features, in_features)`
- excitability: :math:`(out_features)`
- bias: :math:`(out_features)`
- output: :math:`(N, *, out_features)`
(NOTE: `*` means any number of additional dimensions)"""
if excitability is not None:
output = torch.matmul(input, weight.t()) * excitability
else:
output = torch.matmul(input, weight.t())
if bias is not None:
output = output + bias
return output
class LinearExcitabilityNew(nn.Module):
"""Module for a linear transformation with multiplicative excitability-parameter (i.e., learnable) and/or -buffer.
Args:
in_features: size of each input sample
out_features: size of each output sample
bias: if 'False', layer will not learn an additive bias-parameter (DEFAULT=True)
excitability: if 'True', layer will learn a multiplicative excitability-parameter (DEFAULT=False)
excit_buffer: if 'True', layer will have excitability-buffer whose value can be set (DEFAULT=False)
Shape:
- input: :math:`(N, *, in_features)` where `*` means any number of additional dimensions
- output: :math:`(N, *, out_features)` where all but the last dimension are the same shape as the input.
Attributes:
weight: the learnable weights of the module of shape (out_features x in_features)
excitability: the learnable multiplication terms (out_features)
bias: the learnable bias of the module of shape (out_features)
excit_buffer: fixed multiplication variable (out_features)"""
def __init__(self, in_features, out_features, bias=True, excitability=
False, excit_buffer=False):
super(LinearExcitabilityNew, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.Tensor(out_features, in_features))
if excitability:
self.excitability = Parameter(torch.Tensor(out_features))
else:
self.register_parameter('excitability', None)
if bias:
self.bias = Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
if excit_buffer:
buffer = torch.ones(out_features)
self.register_buffer('excit_buffer', buffer)
else:
self.register_buffer('excit_buffer', None)
self.reset_parameters()
def reset_parameters(self):
"""Modifies the parameters to initialize / reset them at appropriate values."""
"""Avoid using in-place init methods"""
stdv = 1.0 / math.sqrt(self.weight.size(1))
self.weight = Parameter(torch.distributions.Uniform(-stdv, stdv).
sample(self.weight.size()))
if self.excitability is not None:
self.excitability = Parameter(torch.ones_like(self.excitability))
if self.bias is not None:
self.bias = Parameter(torch.distributions.Uniform(-stdv, stdv).
sample(self.bias.size()))
def __repr__(self):
return self.__class__.__name__ + '(' + 'in_features=' + str(self.
in_features) + ', out_features=' + str(self.out_features) + ')'
def forward(self, input_0):
primals_1 = self.weight
primals_2 = self.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| cuongpxu/continual-learning | LinearExcitability | false | 1,761 | [
"MIT"
] | 0 | 0f799ddc0efe7e6df7038d2e97303add8d5e01fd | https://github.com/cuongpxu/continual-learning/tree/0f799ddc0efe7e6df7038d2e97303add8d5e01fd | import math
import torch
from torch import nn
from torch.nn.parameter import Parameter
def linearExcitability(input, weight, excitability=None, bias=None):
"""Applies a linear transformation to the incoming data: :math:`y = c(xA^T) + b`.
Shape:
- input: :math:`(N, *, in_features)`
- weight: :math:`(out_features, in_features)`
- excitability: :math:`(out_features)`
- bias: :math:`(out_features)`
- output: :math:`(N, *, out_features)`
(NOTE: `*` means any number of additional dimensions)"""
if excitability is not None:
output = torch.matmul(input, weight.t()) * excitability
else:
output = torch.matmul(input, weight.t())
if bias is not None:
output = output + bias
return output
class Model(nn.Module):
"""Module for a linear transformation with multiplicative excitability-parameter (i.e., learnable) and/or -buffer.
Args:
in_features: size of each input sample
out_features: size of each output sample
bias: if 'False', layer will not learn an additive bias-parameter (DEFAULT=True)
excitability: if 'True', layer will learn a multiplicative excitability-parameter (DEFAULT=False)
excit_buffer: if 'True', layer will have excitability-buffer whose value can be set (DEFAULT=False)
Shape:
- input: :math:`(N, *, in_features)` where `*` means any number of additional dimensions
- output: :math:`(N, *, out_features)` where all but the last dimension are the same shape as the input.
Attributes:
weight: the learnable weights of the module of shape (out_features x in_features)
excitability: the learnable multiplication terms (out_features)
bias: the learnable bias of the module of shape (out_features)
excit_buffer: fixed multiplication variable (out_features)"""
def __init__(self, in_features, out_features, bias=True, excitability=
False, excit_buffer=False):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.Tensor(out_features, in_features))
if excitability:
self.excitability = Parameter(torch.Tensor(out_features))
else:
self.register_parameter('excitability', None)
if bias:
self.bias = Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
if excit_buffer:
buffer = torch.ones(out_features)
self.register_buffer('excit_buffer', buffer)
else:
self.register_buffer('excit_buffer', None)
self.reset_parameters()
def reset_parameters(self):
"""Modifies the parameters to initialize / reset them at appropriate values."""
"""Avoid using in-place init methods"""
stdv = 1.0 / math.sqrt(self.weight.size(1))
self.weight = Parameter(torch.distributions.Uniform(-stdv, stdv).
sample(self.weight.size()))
if self.excitability is not None:
self.excitability = Parameter(torch.ones_like(self.excitability))
if self.bias is not None:
self.bias = Parameter(torch.distributions.Uniform(-stdv, stdv).
sample(self.bias.size()))
def forward(self, input):
"""Running this model's forward step requires/returns:
-[input]: [batch_size]x[...]x[in_features]
-[output]: [batch_size]x[...]x[hidden_features]"""
if self.excit_buffer is None:
excitability = self.excitability
elif self.excitability is None:
excitability = self.excit_buffer
else:
excitability = self.excitability * self.excit_buffer
return linearExcitability(input, self.weight, excitability, self.bias)
def __repr__(self):
return self.__class__.__name__ + '(' +
# ... truncated (>4000 chars) for memory efficiency |
EncoderLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/xx/cxxsy7adcakcv5c6imnaqsvf3ebhgbph77vaoembzakbqdrjycbc.py
# Topologically Sorted Source Nodes: [mean, sub, mul, std, add, truediv, norm], Original ATen: [aten.mean, aten.sub, aten.mul, aten.std, aten.add, aten.div]
# Source node to ATen node mapping:
# add => add
# mean => mean
# mul => mul
# norm => add_1
# std => sqrt, var
# sub => sub
# truediv => div
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_2, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_2, %mean), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %sub), kwargs = {})
# %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%primals_2, [-1]), kwargs = {correction: 1.0, keepdim: True})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%var,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt, 1e-06), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, %add), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, %primals_3), kwargs = {})
triton_poi_fused_add_div_mean_mul_std_sub_0 = async_compile.triton('triton_poi_fused_add_div_mean_mul_std_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_std_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mean_mul_std_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = 4.0
tmp10 = tmp8 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp0 * tmp11
tmp13 = tmp2 - tmp10
tmp14 = tmp13 * tmp13
tmp15 = tmp3 - tmp10
tmp16 = tmp15 * tmp15
tmp17 = tmp14 + tmp16
tmp18 = tmp5 - tmp10
tmp19 = tmp18 * tmp18
tmp20 = tmp17 + tmp19
tmp21 = tmp7 - tmp10
tmp22 = tmp21 * tmp21
tmp23 = tmp20 + tmp22
tmp24 = 3.0
tmp25 = tmp23 / tmp24
tmp26 = libdevice.sqrt(tmp25)
tmp27 = 1e-06
tmp28 = tmp26 + tmp27
tmp29 = tmp12 / tmp28
tmp31 = tmp29 + tmp30
tl.store(out_ptr0 + (x2), tmp31, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/mh/cmhet4vfl4jlxtge4zzaaa2nugvxpr5f4ge7rs72qwe2aow7vxwy.py
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# matmul => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/pv/cpvxifzinnau3xu6vyqheclzr6mcqs5g6lyequacfqmznapjidec.py
# Topologically Sorted Source Nodes: [eq], Original ATen: [aten.eq]
# Source node to ATen node mapping:
# eq => eq
# Graph fragment:
# %eq : [num_users=2] = call_function[target=torch.ops.aten.eq.Scalar](args = (%unsqueeze, 0), kwargs = {})
triton_poi_fused_eq_2 = async_compile.triton('triton_poi_fused_eq_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_eq_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_eq_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 == tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/2i/c2i57js6jak6bynwgyueladupcbb2qpciiu6btv5dj64tjdpelzi.py
# Topologically Sorted Source Nodes: [scores, scores_1, scores_2], Original ATen: [aten.div, aten.masked_fill, aten._softmax]
# Source node to ATen node mapping:
# scores => div_1
# scores_1 => full_default, where
# scores_2 => amax, exp, sub_1, sum_1
# Graph fragment:
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_11, 1.0), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -1000000000.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %div_1), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where, [-1], True), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
triton_poi_fused__softmax_div_masked_fill_3 = async_compile.triton('triton_poi_fused__softmax_div_masked_fill_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*i1', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_div_masked_fill_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_div_masked_fill_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = (xindex // 16)
x3 = xindex
tmp0 = tl.load(in_ptr0 + ((4*x0) + (16*x2)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp1 = tl.load(in_ptr1 + (4*x3), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp7 = tl.load(in_ptr1 + (1 + (4*x3)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (2 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp12 = tl.load(in_ptr1 + (2 + (4*x3)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (3 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp17 = tl.load(in_ptr1 + (3 + (4*x3)), xmask, eviction_policy='evict_last')
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp4 = -1000000000.0
tmp5 = tl.where(tmp0, tmp4, tmp3)
tmp8 = tmp7 * tmp2
tmp9 = tl.where(tmp6, tmp4, tmp8)
tmp10 = triton_helpers.maximum(tmp5, tmp9)
tmp13 = tmp12 * tmp2
tmp14 = tl.where(tmp11, tmp4, tmp13)
tmp15 = triton_helpers.maximum(tmp10, tmp14)
tmp18 = tmp17 * tmp2
tmp19 = tl.where(tmp16, tmp4, tmp18)
tmp20 = triton_helpers.maximum(tmp15, tmp19)
tmp21 = tmp5 - tmp20
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp9 - tmp20
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp26 = tmp14 - tmp20
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp25 + tmp27
tmp29 = tmp19 - tmp20
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp28 + tmp30
tl.store(out_ptr0 + (x3), tmp20, xmask)
tl.store(out_ptr1 + (x3), tmp31, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/rw/crwno6mlbzllxkax3dzrhb3xva3g3yd52nkztijmiyd6omrsilpe.py
# Topologically Sorted Source Nodes: [scores, scores_1, scores_2], Original ATen: [aten.div, aten.masked_fill, aten._softmax]
# Source node to ATen node mapping:
# scores => div_1
# scores_1 => full_default, where
# scores_2 => amax, div_2, exp, sub_1
# Graph fragment:
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_11, 1.0), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -1000000000.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %div_1), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where, [-1], True), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
# %div_2 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_div_masked_fill_4 = async_compile.triton('triton_poi_fused__softmax_div_masked_fill_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_div_masked_fill_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_div_masked_fill_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = (xindex // 64)
x4 = xindex % 16
x5 = xindex
x6 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x4 + (16*x3)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp1 = tl.load(in_out_ptr0 + (x5), xmask)
tmp6 = tl.load(in_ptr1 + (x6), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + (x6), xmask, eviction_policy='evict_last')
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp4 = -1000000000.0
tmp5 = tl.where(tmp0, tmp4, tmp3)
tmp7 = tmp5 - tmp6
tmp8 = tl_math.exp(tmp7)
tmp10 = tmp8 / tmp9
tl.store(in_out_ptr0 + (x5), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/we/cwe54p4p4jvwbdktkpj3wy2coheu6f3r3dgvi7ozm7xjfk4mgbwx.py
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# contiguous => clone_4
# Graph fragment:
# %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_5 = async_compile.triton('triton_poi_fused_clone_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/ez/cezumc4bfxcfwb2v57uxko3oual5p4k5atbughboziniozdii3kw.py
# Topologically Sorted Source Nodes: [x, mean_1, std_1], Original ATen: [aten.add, aten.mean, aten.std]
# Source node to ATen node mapping:
# mean_1 => mean_1
# std_1 => var_1
# x => add_2
# Graph fragment:
# %add_2 : [num_users=4] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_2, %view_17), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add_2, [-1], True), kwargs = {})
# %var_1 : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%add_2, [-1]), kwargs = {correction: 1.0, keepdim: True})
triton_poi_fused_add_mean_std_6 = async_compile.triton('triton_poi_fused_add_mean_std_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mean_std_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mean_std_6(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = 3.0
tmp29 = tmp27 / tmp28
tl.store(out_ptr0 + (x0), tmp16, xmask)
tl.store(in_out_ptr0 + (x0), tmp29, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/44/c44nsundpysjinc27mkdhx3tcd7nqj5ir3mp6sd7nqx3ryqium46.py
# Topologically Sorted Source Nodes: [x, mean_1, sub_1, mul_1, std_1, add_3, truediv_2, norm_1], Original ATen: [aten.add, aten.mean, aten.sub, aten.mul, aten.std, aten.div]
# Source node to ATen node mapping:
# add_3 => add_3
# mean_1 => mean_1
# mul_1 => mul_1
# norm_1 => add_4
# std_1 => sqrt_1
# sub_1 => sub_2
# truediv_2 => div_3
# x => add_2
# Graph fragment:
# %add_2 : [num_users=4] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_2, %view_17), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add_2, [-1], True), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_2, %mean_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_13, %sub_2), kwargs = {})
# %sqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%var_1,), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt_1, 1e-06), kwargs = {})
# %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_1, %add_3), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div_3, %primals_14), kwargs = {})
triton_poi_fused_add_div_mean_mul_std_sub_7 = async_compile.triton('triton_poi_fused_add_div_mean_mul_std_sub_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_std_sub_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mean_mul_std_sub_7(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr2 + (x2), xmask)
tmp4 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 - tmp4
tmp6 = tmp0 * tmp5
tmp8 = libdevice.sqrt(tmp7)
tmp9 = 1e-06
tmp10 = tmp8 + tmp9
tmp11 = tmp6 / tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/rv/crvf2y3myjmfsbbg2ubhxgcshxxvsclo3cxqhn4x2kt2s5qfl37x.py
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# relu => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_19,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_8 = async_compile.triton('triton_poi_fused_relu_threshold_backward_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_8(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 2048
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/x7/cx7y3kua2bihmqz24vhzuizsgkwgtdcyxynklnn7saro6oxogdky.py
# Topologically Sorted Source Nodes: [x, x_3], Original ATen: [aten.add]
# Source node to ATen node mapping:
# x => add_2
# x_3 => add_5
# Graph fragment:
# %add_2 : [num_users=4] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_2, %view_17), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %view_21), kwargs = {})
triton_poi_fused_add_9 = async_compile.triton('triton_poi_fused_add_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_9(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp3 = tl.load(in_out_ptr0 + (x2), xmask)
tmp4 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18 = args
args.clear()
assert_size_stride(primals_1, (4, ), (1, ))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_11, (4, 4), (4, 1))
assert_size_stride(primals_12, (4, ), (1, ))
assert_size_stride(primals_13, (4, ), (1, ))
assert_size_stride(primals_14, (4, ), (1, ))
assert_size_stride(primals_15, (2048, 4), (4, 1))
assert_size_stride(primals_16, (2048, ), (1, ))
assert_size_stride(primals_17, (4, 2048), (2048, 1))
assert_size_stride(primals_18, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mean, sub, mul, std, add, truediv, norm], Original ATen: [aten.mean, aten.sub, aten.mul, aten.std, aten.add, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_mean_mul_std_sub_0.run(primals_1, primals_2, primals_3, buf0, 64, grid=grid(64), stream=stream0)
del primals_1
del primals_3
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2)
buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
triton_poi_fused_clone_1.run(buf2, primals_7, buf4, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_7
buf5 = reinterpret_tensor(buf2, (4, 4, 1, 4), (16, 4, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
triton_poi_fused_clone_1.run(buf1, primals_5, buf5, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_5
buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), out=buf6)
buf7 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [eq], Original ATen: [aten.eq]
triton_poi_fused_eq_2.run(primals_10, buf7, 64, grid=grid(64), stream=stream0)
del primals_10
buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0); del buf1 # reuse
buf9 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [scores, scores_1, scores_2], Original ATen: [aten.div, aten.masked_fill, aten._softmax]
triton_poi_fused__softmax_div_masked_fill_3.run(buf7, buf6, buf8, buf9, 64, grid=grid(64), stream=stream0)
buf10 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf6 # reuse
# Topologically Sorted Source Nodes: [scores, scores_1, scores_2], Original ATen: [aten.div, aten.masked_fill, aten._softmax]
triton_poi_fused__softmax_div_masked_fill_4.run(buf10, buf7, buf8, buf9, 256, grid=grid(256), stream=stream0)
buf11 = reinterpret_tensor(buf9, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf9 # reuse
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.clone]
triton_poi_fused_clone_1.run(buf3, primals_9, buf11, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_9
buf12 = reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 1), 0); del buf3 # reuse
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf10, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf11, (16, 4, 1), (4, 1, 0), 0), out=buf12)
buf13 = reinterpret_tensor(buf8, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf8 # reuse
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
triton_poi_fused_clone_5.run(buf12, buf13, 16, 4, grid=grid(16, 4), stream=stream0)
buf14 = reinterpret_tensor(buf12, (16, 4), (4, 1), 0); del buf12 # reuse
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_12, reinterpret_tensor(buf13, (16, 4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf14)
del primals_12
buf15 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf16 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf17 = buf16; del buf16 # reuse
# Topologically Sorted Source Nodes: [x, mean_1, std_1], Original ATen: [aten.add, aten.mean, aten.std]
triton_poi_fused_add_mean_std_6.run(buf17, primals_2, buf14, buf15, 16, grid=grid(16), stream=stream0)
buf18 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x, mean_1, sub_1, mul_1, std_1, add_3, truediv_2, norm_1], Original ATen: [aten.add, aten.mean, aten.sub, aten.mul, aten.std, aten.div]
triton_poi_fused_add_div_mean_mul_std_sub_7.run(primals_13, primals_2, buf14, buf15, buf17, primals_14, buf18, 64, grid=grid(64), stream=stream0)
del buf15
del buf17
del primals_14
buf19 = empty_strided_cuda((16, 2048), (2048, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf18, (16, 4), (4, 1), 0), reinterpret_tensor(primals_15, (4, 2048), (1, 4), 0), out=buf19)
buf20 = reinterpret_tensor(buf19, (4, 4, 2048), (8192, 2048, 1), 0); del buf19 # reuse
buf23 = empty_strided_cuda((4, 4, 2048), (8192, 2048, 1), torch.bool)
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_8.run(buf20, primals_16, buf23, 32768, grid=grid(32768), stream=stream0)
del primals_16
buf21 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf20, (16, 2048), (2048, 1), 0), reinterpret_tensor(primals_17, (2048, 4), (1, 2048), 0), out=buf21)
buf22 = reinterpret_tensor(buf21, (4, 4, 4), (16, 4, 1), 0); del buf21 # reuse
# Topologically Sorted Source Nodes: [x, x_3], Original ATen: [aten.add]
triton_poi_fused_add_9.run(buf22, primals_2, buf14, primals_18, 64, grid=grid(64), stream=stream0)
del primals_18
return (buf22, primals_2, primals_13, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), buf7, buf10, reinterpret_tensor(buf13, (16, 4), (4, 1), 0), buf14, reinterpret_tensor(buf18, (16, 4), (4, 1), 0), reinterpret_tensor(buf20, (16, 2048), (2048, 1), 0), primals_17, buf23, primals_15, primals_11, reinterpret_tensor(buf11, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 4), 0), primals_8, primals_6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((2048, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((4, 2048), (2048, 1), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn as nn
import torch.nn.functional as F
def attention(q, k, v, d_k, mask=None, dropout=None):
scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None:
mask = mask.unsqueeze(1)
scores = scores.masked_fill(mask == 0, -1000000000.0)
scores = F.softmax(scores, dim=-1)
if dropout is not None:
scores = dropout(scores)
output = torch.matmul(scores, v)
return output
class FeedForward(nn.Module):
def __init__(self, d_model, d_ff=2048, dropout=0.1):
super().__init__()
self.linear_1 = nn.Linear(d_model, d_ff)
self.dropout = nn.Dropout(dropout)
self.linear_2 = nn.Linear(d_ff, d_model)
def forward(self, x):
x = self.dropout(F.relu(self.linear_1(x)))
x = self.linear_2(x)
return x
class MultiHeadAttention(nn.Module):
def __init__(self, heads, d_model, dropout=0.1):
super().__init__()
self.d_model = d_model
self.d_k = d_model // heads
self.h = heads
self.q_linear = nn.Linear(d_model, d_model)
self.v_linear = nn.Linear(d_model, d_model)
self.k_linear = nn.Linear(d_model, d_model)
self.dropout = nn.Dropout(dropout)
self.out = nn.Linear(d_model, d_model)
def forward(self, q, k, v, mask=None):
bs = q.size(0)
k = self.k_linear(k).view(bs, -1, self.h, self.d_k)
q = self.q_linear(q).view(bs, -1, self.h, self.d_k)
v = self.v_linear(v).view(bs, -1, self.h, self.d_k)
k = k.transpose(1, 2)
q = q.transpose(1, 2)
v = v.transpose(1, 2)
scores = attention(q, k, v, self.d_k, mask, self.dropout)
concat = scores.transpose(1, 2).contiguous().view(bs, -1, self.d_model)
output = self.out(concat)
return output
class Norm(nn.Module):
def __init__(self, d_model, eps=1e-06):
super().__init__()
self.size = d_model
self.alpha = nn.Parameter(torch.ones(self.size))
self.bias = nn.Parameter(torch.zeros(self.size))
self.eps = eps
def forward(self, x):
norm = self.alpha * (x - x.mean(dim=-1, keepdim=True)) / (x.std(dim
=-1, keepdim=True) + self.eps) + self.bias
return norm
class EncoderLayer(nn.Module):
def __init__(self, d_model, heads, dropout=0.1):
super().__init__()
self.norm_1 = Norm(d_model)
self.norm_2 = Norm(d_model)
self.attn = MultiHeadAttention(heads, d_model, dropout=dropout)
self.ff = FeedForward(d_model, dropout=dropout)
self.dropout_1 = nn.Dropout(dropout)
self.dropout_2 = nn.Dropout(dropout)
def forward(self, x, mask):
x2 = self.norm_1(x)
x = x + self.dropout_1(self.attn(x2, x2, x2, mask))
x2 = self.norm_2(x)
x = x + self.dropout_2(self.ff(x2))
return x
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'heads': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_div_mean_mul_std_sub_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = 4.0
tmp10 = tmp8 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp0 * tmp11
tmp13 = tmp2 - tmp10
tmp14 = tmp13 * tmp13
tmp15 = tmp3 - tmp10
tmp16 = tmp15 * tmp15
tmp17 = tmp14 + tmp16
tmp18 = tmp5 - tmp10
tmp19 = tmp18 * tmp18
tmp20 = tmp17 + tmp19
tmp21 = tmp7 - tmp10
tmp22 = tmp21 * tmp21
tmp23 = tmp20 + tmp22
tmp24 = 3.0
tmp25 = tmp23 / tmp24
tmp26 = libdevice.sqrt(tmp25)
tmp27 = 1e-06
tmp28 = tmp26 + tmp27
tmp29 = tmp12 / tmp28
tmp31 = tmp29 + tmp30
tl.store(out_ptr0 + x2, tmp31, xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_eq_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 == tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_div_masked_fill_3(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (4 * x0 + 16 * x2), xmask, eviction_policy=
'evict_last').to(tl.int1)
tmp1 = tl.load(in_ptr1 + 4 * x3, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0 + 16 * x2), xmask, eviction_policy
='evict_last').to(tl.int1)
tmp7 = tl.load(in_ptr1 + (1 + 4 * x3), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (2 + 4 * x0 + 16 * x2), xmask,
eviction_policy='evict_last').to(tl.int1)
tmp12 = tl.load(in_ptr1 + (2 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp16 = tl.load(in_ptr0 + (3 + 4 * x0 + 16 * x2), xmask,
eviction_policy='evict_last').to(tl.int1)
tmp17 = tl.load(in_ptr1 + (3 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp4 = -1000000000.0
tmp5 = tl.where(tmp0, tmp4, tmp3)
tmp8 = tmp7 * tmp2
tmp9 = tl.where(tmp6, tmp4, tmp8)
tmp10 = triton_helpers.maximum(tmp5, tmp9)
tmp13 = tmp12 * tmp2
tmp14 = tl.where(tmp11, tmp4, tmp13)
tmp15 = triton_helpers.maximum(tmp10, tmp14)
tmp18 = tmp17 * tmp2
tmp19 = tl.where(tmp16, tmp4, tmp18)
tmp20 = triton_helpers.maximum(tmp15, tmp19)
tmp21 = tmp5 - tmp20
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp9 - tmp20
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp26 = tmp14 - tmp20
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp25 + tmp27
tmp29 = tmp19 - tmp20
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp28 + tmp30
tl.store(out_ptr0 + x3, tmp20, xmask)
tl.store(out_ptr1 + x3, tmp31, xmask)
@triton.jit
def triton_poi_fused__softmax_div_masked_fill_4(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex // 64
x4 = xindex % 16
x5 = xindex
x6 = xindex // 4
tmp0 = tl.load(in_ptr0 + (x4 + 16 * x3), xmask, eviction_policy=
'evict_last').to(tl.int1)
tmp1 = tl.load(in_out_ptr0 + x5, xmask)
tmp6 = tl.load(in_ptr1 + x6, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + x6, xmask, eviction_policy='evict_last')
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp4 = -1000000000.0
tmp5 = tl.where(tmp0, tmp4, tmp3)
tmp7 = tmp5 - tmp6
tmp8 = tl_math.exp(tmp7)
tmp10 = tmp8 / tmp9
tl.store(in_out_ptr0 + x5, tmp10, xmask)
@triton.jit
def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_mean_std_6(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = 3.0
tmp29 = tmp27 / tmp28
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(in_out_ptr0 + x0, tmp29, xmask)
@triton.jit
def triton_poi_fused_add_div_mean_mul_std_sub_7(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x2, xmask)
tmp4 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 - tmp4
tmp6 = tmp0 * tmp5
tmp8 = libdevice.sqrt(tmp7)
tmp9 = 1e-06
tmp10 = tmp8 + tmp9
tmp11 = tmp6 / tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_8(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 2048
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_add_9(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_out_ptr0 + x2, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17, primals_18
) = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_11, (4, 4), (4, 1))
assert_size_stride(primals_12, (4,), (1,))
assert_size_stride(primals_13, (4,), (1,))
assert_size_stride(primals_14, (4,), (1,))
assert_size_stride(primals_15, (2048, 4), (4, 1))
assert_size_stride(primals_16, (2048,), (1,))
assert_size_stride(primals_17, (4, 2048), (2048, 1))
assert_size_stride(primals_18, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mean_mul_std_sub_0[grid(64)](primals_1,
primals_2, primals_3, buf0, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del primals_1
del primals_3
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2)
buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_1[grid(16, 4)](buf2, primals_7, buf4, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_7
buf5 = reinterpret_tensor(buf2, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf2
triton_poi_fused_clone_1[grid(16, 4)](buf1, primals_5, buf5, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), out=buf6)
buf7 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.bool)
triton_poi_fused_eq_2[grid(64)](primals_10, buf7, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_10
buf8 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0)
del buf1
buf9 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__softmax_div_masked_fill_3[grid(64)](buf7, buf6,
buf8, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf10 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf6
triton_poi_fused__softmax_div_masked_fill_4[grid(256)](buf10, buf7,
buf8, buf9, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf11 = reinterpret_tensor(buf9, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf9
triton_poi_fused_clone_1[grid(16, 4)](buf3, primals_9, buf11, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_9
buf12 = reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 1), 0)
del buf3
extern_kernels.bmm(reinterpret_tensor(buf10, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf11, (16, 4, 1), (4, 1, 0), 0), out=buf12)
buf13 = reinterpret_tensor(buf8, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf8
triton_poi_fused_clone_5[grid(16, 4)](buf12, buf13, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf14 = reinterpret_tensor(buf12, (16, 4), (4, 1), 0)
del buf12
extern_kernels.addmm(primals_12, reinterpret_tensor(buf13, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf14)
del primals_12
buf15 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf16 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf17 = buf16
del buf16
triton_poi_fused_add_mean_std_6[grid(16)](buf17, primals_2, buf14,
buf15, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf18 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_div_mean_mul_std_sub_7[grid(64)](primals_13,
primals_2, buf14, buf15, buf17, primals_14, buf18, 64, XBLOCK=
64, num_warps=1, num_stages=1)
del buf15
del buf17
del primals_14
buf19 = empty_strided_cuda((16, 2048), (2048, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf18, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_15, (4, 2048), (1, 4), 0), out=buf19)
buf20 = reinterpret_tensor(buf19, (4, 4, 2048), (8192, 2048, 1), 0)
del buf19
buf23 = empty_strided_cuda((4, 4, 2048), (8192, 2048, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_8[grid(32768)](buf20,
primals_16, buf23, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_16
buf21 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf20, (16, 2048), (2048, 1),
0), reinterpret_tensor(primals_17, (2048, 4), (1, 2048), 0),
out=buf21)
buf22 = reinterpret_tensor(buf21, (4, 4, 4), (16, 4, 1), 0)
del buf21
triton_poi_fused_add_9[grid(64)](buf22, primals_2, buf14,
primals_18, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_18
return buf22, primals_2, primals_13, reinterpret_tensor(buf0, (16, 4),
(4, 1), 0), buf7, buf10, reinterpret_tensor(buf13, (16, 4), (4, 1), 0
), buf14, reinterpret_tensor(buf18, (16, 4), (4, 1), 0
), reinterpret_tensor(buf20, (16, 2048), (2048, 1), 0
), primals_17, buf23, primals_15, primals_11, reinterpret_tensor(buf11,
(16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 1, 4), (4,
1, 1), 0), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 4), 0
), primals_8, primals_6, primals_4
def attention(q, k, v, d_k, mask=None, dropout=None):
scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None:
mask = mask.unsqueeze(1)
scores = scores.masked_fill(mask == 0, -1000000000.0)
scores = F.softmax(scores, dim=-1)
if dropout is not None:
scores = dropout(scores)
output = torch.matmul(scores, v)
return output
class FeedForward(nn.Module):
def __init__(self, d_model, d_ff=2048, dropout=0.1):
super().__init__()
self.linear_1 = nn.Linear(d_model, d_ff)
self.dropout = nn.Dropout(dropout)
self.linear_2 = nn.Linear(d_ff, d_model)
def forward(self, x):
x = self.dropout(F.relu(self.linear_1(x)))
x = self.linear_2(x)
return x
class MultiHeadAttention(nn.Module):
def __init__(self, heads, d_model, dropout=0.1):
super().__init__()
self.d_model = d_model
self.d_k = d_model // heads
self.h = heads
self.q_linear = nn.Linear(d_model, d_model)
self.v_linear = nn.Linear(d_model, d_model)
self.k_linear = nn.Linear(d_model, d_model)
self.dropout = nn.Dropout(dropout)
self.out = nn.Linear(d_model, d_model)
def forward(self, q, k, v, mask=None):
bs = q.size(0)
k = self.k_linear(k).view(bs, -1, self.h, self.d_k)
q = self.q_linear(q).view(bs, -1, self.h, self.d_k)
v = self.v_linear(v).view(bs, -1, self.h, self.d_k)
k = k.transpose(1, 2)
q = q.transpose(1, 2)
v = v.transpose(1, 2)
scores = attention(q, k, v, self.d_k, mask, self.dropout)
concat = scores.transpose(1, 2).contiguous().view(bs, -1, self.d_model)
output = self.out(concat)
return output
class Norm(nn.Module):
def __init__(self, d_model, eps=1e-06):
super().__init__()
self.size = d_model
self.alpha = nn.Parameter(torch.ones(self.size))
self.bias = nn.Parameter(torch.zeros(self.size))
self.eps = eps
def forward(self, x):
norm = self.alpha * (x - x.mean(dim=-1, keepdim=True)) / (x.std(dim
=-1, keepdim=True) + self.eps) + self.bias
return norm
class EncoderLayerNew(nn.Module):
def __init__(self, d_model, heads, dropout=0.1):
super().__init__()
self.norm_1 = Norm(d_model)
self.norm_2 = Norm(d_model)
self.attn = MultiHeadAttention(heads, d_model, dropout=dropout)
self.ff = FeedForward(d_model, dropout=dropout)
self.dropout_1 = nn.Dropout(dropout)
self.dropout_2 = nn.Dropout(dropout)
def forward(self, input_0, input_1):
primals_1 = self.norm_1.alpha
primals_3 = self.norm_1.bias
primals_5 = self.norm_2.alpha
primals_7 = self.norm_2.bias
primals_4 = self.attn.q_linear.weight
primals_9 = self.attn.q_linear.bias
primals_6 = self.attn.v_linear.weight
primals_12 = self.attn.v_linear.bias
primals_8 = self.attn.k_linear.weight
primals_13 = self.attn.k_linear.bias
primals_11 = self.attn.out.weight
primals_14 = self.attn.out.bias
primals_15 = self.ff.linear_1.weight
primals_16 = self.ff.linear_1.bias
primals_17 = self.ff.linear_2.weight
primals_18 = self.ff.linear_2.bias
primals_2 = input_0
primals_10 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18])
return output[0]
| congson1293/Transformer | EncoderLayer | false | 1,762 | [
"Apache-2.0"
] | 0 | 249638f3287e0ed11c71496178fe2ceac2d758df | https://github.com/congson1293/Transformer/tree/249638f3287e0ed11c71496178fe2ceac2d758df | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
def attention(q, k, v, d_k, mask=None, dropout=None):
scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None:
mask = mask.unsqueeze(1)
scores = scores.masked_fill(mask == 0, -1000000000.0)
scores = F.softmax(scores, dim=-1)
if dropout is not None:
scores = dropout(scores)
output = torch.matmul(scores, v)
return output
class FeedForward(nn.Module):
def __init__(self, d_model, d_ff=2048, dropout=0.1):
super().__init__()
self.linear_1 = nn.Linear(d_model, d_ff)
self.dropout = nn.Dropout(dropout)
self.linear_2 = nn.Linear(d_ff, d_model)
def forward(self, x):
x = self.dropout(F.relu(self.linear_1(x)))
x = self.linear_2(x)
return x
class MultiHeadAttention(nn.Module):
def __init__(self, heads, d_model, dropout=0.1):
super().__init__()
self.d_model = d_model
self.d_k = d_model // heads
self.h = heads
self.q_linear = nn.Linear(d_model, d_model)
self.v_linear = nn.Linear(d_model, d_model)
self.k_linear = nn.Linear(d_model, d_model)
self.dropout = nn.Dropout(dropout)
self.out = nn.Linear(d_model, d_model)
def forward(self, q, k, v, mask=None):
bs = q.size(0)
k = self.k_linear(k).view(bs, -1, self.h, self.d_k)
q = self.q_linear(q).view(bs, -1, self.h, self.d_k)
v = self.v_linear(v).view(bs, -1, self.h, self.d_k)
k = k.transpose(1, 2)
q = q.transpose(1, 2)
v = v.transpose(1, 2)
scores = attention(q, k, v, self.d_k, mask, self.dropout)
concat = scores.transpose(1, 2).contiguous().view(bs, -1, self.d_model)
output = self.out(concat)
return output
class Norm(nn.Module):
def __init__(self, d_model, eps=1e-06):
super().__init__()
self.size = d_model
self.alpha = nn.Parameter(torch.ones(self.size))
self.bias = nn.Parameter(torch.zeros(self.size))
self.eps = eps
def forward(self, x):
norm = self.alpha * (x - x.mean(dim=-1, keepdim=True)) / (x.std(dim
=-1, keepdim=True) + self.eps) + self.bias
return norm
class Model(nn.Module):
def __init__(self, d_model, heads, dropout=0.1):
super().__init__()
self.norm_1 = Norm(d_model)
self.norm_2 = Norm(d_model)
self.attn = MultiHeadAttention(heads, d_model, dropout=dropout)
self.ff = FeedForward(d_model, dropout=dropout)
self.dropout_1 = nn.Dropout(dropout)
self.dropout_2 = nn.Dropout(dropout)
def forward(self, x, mask):
x2 = self.norm_1(x)
x = x + self.dropout_1(self.attn(x2, x2, x2, mask))
x2 = self.norm_2(x)
x = x + self.dropout_2(self.ff(x2))
return x
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
DenseGraphConv | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/3i/c3iibarqokkpdvrjfwijmnifcbse5v3c5vj2oqjzbj6rok54jxf5.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# out => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/w2/cw2bwqpq3dkexeyqz25khcvdcedkdcrcwpb7zrtd6eayijd5lgez.py
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.add]
# Source node to ATen node mapping:
# out_2 => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_4, %view_6), kwargs = {})
triton_poi_fused_add_1 = async_compile.triton('triton_poi_fused_add_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x2), xmask)
tmp2 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(primals_2, buf0, 256, grid=grid(256), stream=stream0)
del primals_2
buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_1, (16, 4, 4), (16, 4, 1), 0), out=buf1)
buf2 = reinterpret_tensor(buf0, (64, 4), (4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_3, out=buf2)
del primals_3
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3)
del primals_4
buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.add]
triton_poi_fused_add_1.run(buf4, buf3, primals_5, 256, grid=grid(256), stream=stream0)
del buf3
del primals_5
return (buf4, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (4, 64), (1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
from torch.nn import Parameter
import torch.utils.data
def uniform(size, tensor):
bound = 1.0 / math.sqrt(size)
if tensor is not None:
tensor.data.uniform_(-bound, bound)
class DenseGraphConv(torch.nn.Module):
"""See :class:`torch_geometric.nn.conv.GraphConv`.
"""
def __init__(self, in_channels, out_channels, aggr='add', bias=True):
assert aggr in ['add', 'mean', 'max']
super(DenseGraphConv, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.aggr = aggr
self.weight = Parameter(torch.Tensor(in_channels, out_channels))
self.lin = torch.nn.Linear(in_channels, out_channels, bias=bias)
self.reset_parameters()
def reset_parameters(self):
uniform(self.in_channels, self.weight)
self.lin.reset_parameters()
def forward(self, x, adj, mask=None):
"""
Args:
x (Tensor): Node feature tensor :math:`\\mathbf{X} \\in \\mathbb{R}^{B
\\times N \\times F}`, with batch-size :math:`B`, (maximum)
number of nodes :math:`N` for each graph, and feature
dimension :math:`F`.
adj (Tensor): Adjacency tensor :math:`\\mathbf{A} \\in \\mathbb{R}^{B
\\times N \\times N}`. The adjacency tensor is broadcastable in
the batch dimension, resulting in a shared adjacency matrix for
the complete batch.
mask (BoolTensor, optional): Mask matrix
:math:`\\mathbf{M} \\in {\\{ 0, 1 \\}}^{B \\times N}` indicating
the valid nodes for each graph. (default: :obj:`None`)
"""
x = x.unsqueeze(0) if x.dim() == 2 else x
adj = adj.unsqueeze(0) if adj.dim() == 2 else adj
B, N, _ = adj.size()
out = torch.matmul(adj, x)
out = torch.matmul(out, self.weight)
if self.aggr == 'mean':
out = out / adj.sum(dim=-1, keepdim=True).clamp(min=1)
elif self.aggr == 'max':
out = out.max(dim=-1)[0]
out = out + self.lin(x)
if mask is not None:
out = out * mask.view(B, N, 1)
return out
def __repr__(self):
return '{}({}, {})'.format(self.__class__.__name__, self.
in_channels, self.out_channels)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
from torch.nn import Parameter
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x2 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(256)](primals_2, buf0, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(primals_1, (16, 4, 4), (16, 4, 1), 0),
out=buf1)
buf2 = reinterpret_tensor(buf0, (64, 4), (4, 1), 0)
del buf0
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
primals_3, out=buf2)
del primals_3
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3)
del primals_4
buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused_add_1[grid(256)](buf4, buf3, primals_5, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del buf3
del primals_5
return buf4, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (4, 64), (1, 4), 0)
def uniform(size, tensor):
bound = 1.0 / math.sqrt(size)
if tensor is not None:
tensor.data.uniform_(-bound, bound)
class DenseGraphConvNew(torch.nn.Module):
"""See :class:`torch_geometric.nn.conv.GraphConv`.
"""
def __init__(self, in_channels, out_channels, aggr='add', bias=True):
assert aggr in ['add', 'mean', 'max']
super(DenseGraphConvNew, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.aggr = aggr
self.weight = Parameter(torch.Tensor(in_channels, out_channels))
self.lin = torch.nn.Linear(in_channels, out_channels, bias=bias)
self.reset_parameters()
def reset_parameters(self):
uniform(self.in_channels, self.weight)
self.lin.reset_parameters()
def __repr__(self):
return '{}({}, {})'.format(self.__class__.__name__, self.
in_channels, self.out_channels)
def forward(self, input_0, input_1):
primals_3 = self.weight
primals_4 = self.lin.weight
primals_5 = self.lin.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| cshjin/pytorch_geometric | DenseGraphConv | false | 1,763 | [
"MIT"
] | 0 | 8dd0e76beb72135949a275edd851f80f7b97648f | https://github.com/cshjin/pytorch_geometric/tree/8dd0e76beb72135949a275edd851f80f7b97648f | import math
import torch
from torch.nn import Parameter
import torch.utils.data
def uniform(size, tensor):
bound = 1.0 / math.sqrt(size)
if tensor is not None:
tensor.data.uniform_(-bound, bound)
class Model(torch.nn.Module):
"""See :class:`torch_geometric.nn.conv.GraphConv`.
"""
def __init__(self, in_channels, out_channels, aggr='add', bias=True):
assert aggr in ['add', 'mean', 'max']
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.aggr = aggr
self.weight = Parameter(torch.Tensor(in_channels, out_channels))
self.lin = torch.nn.Linear(in_channels, out_channels, bias=bias)
self.reset_parameters()
def reset_parameters(self):
uniform(self.in_channels, self.weight)
self.lin.reset_parameters()
def forward(self, x, adj, mask=None):
"""
Args:
x (Tensor): Node feature tensor :math:`\\mathbf{X} \\in \\mathbb{R}^{B
\\times N \\times F}`, with batch-size :math:`B`, (maximum)
number of nodes :math:`N` for each graph, and feature
dimension :math:`F`.
adj (Tensor): Adjacency tensor :math:`\\mathbf{A} \\in \\mathbb{R}^{B
\\times N \\times N}`. The adjacency tensor is broadcastable in
the batch dimension, resulting in a shared adjacency matrix for
the complete batch.
mask (BoolTensor, optional): Mask matrix
:math:`\\mathbf{M} \\in {\\{ 0, 1 \\}}^{B \\times N}` indicating
the valid nodes for each graph. (default: :obj:`None`)
"""
x = x.unsqueeze(0) if x.dim() == 2 else x
adj = adj.unsqueeze(0) if adj.dim() == 2 else adj
B, N, _ = adj.size()
out = torch.matmul(adj, x)
out = torch.matmul(out, self.weight)
if self.aggr == 'mean':
out = out / adj.sum(dim=-1, keepdim=True).clamp(min=1)
elif self.aggr == 'max':
out = out.max(dim=-1)[0]
out = out + self.lin(x)
if mask is not None:
out = out * mask.view(B, N, 1)
return out
def __repr__(self):
return '{}({}, {})'.format(self.__class__.__name__, self.
in_channels, self.out_channels)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
BasicConv | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/7k/c7kbxovtq4f3bykwez4rrzyn73bqtbiv6o2f3lrcq5n2yf6b36vs.py
# Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# relu => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 6
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/zi/cziljm4x2mwlsrzd6lgu2ol5wiicmcdex3lpg23avfluyxcsxmol.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x => _low_memory_max_pool2d_with_offsets, getitem_1
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 96
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = (xindex // 2)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (8*x1)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (8*x1)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (4 + (2*x0) + (8*x1)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (5 + (2*x0) + (8*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x2), tmp15, xmask)
tl.store(out_ptr1 + (x2), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/7x/c7xscjkhfxioj3yqt7zpbp6xho5x5wmzfuayrpvzoij5pp56u75i.py
# Topologically Sorted Source Nodes: [relu_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# relu_1 => relu_1
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_5), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_2 = async_compile.triton('triton_poi_fused_relu_threshold_backward_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 120
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x0), tmp4, xmask)
tl.store(out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 6, 6), (36, 36, 6, 1))
assert_size_stride(primals_2, (6, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_3, (6, ), (1, ))
assert_size_stride(primals_4, (120, 96), (96, 1))
assert_size_stride(primals_5, (120, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 6, 4, 4), (96, 16, 4, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_3, 384, grid=grid(384), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((4, 6, 2, 2), (24, 4, 2, 1), torch.int8)
buf3 = empty_strided_cuda((4, 6, 2, 2), (24, 4, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_1.run(buf1, buf2, buf3, 96, grid=grid(96), stream=stream0)
buf4 = empty_strided_cuda((1, 120), (120, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf3, (1, 96), (0, 1), 0), reinterpret_tensor(primals_4, (96, 120), (1, 96), 0), out=buf4)
buf5 = buf4; del buf4 # reuse
buf6 = empty_strided_cuda((1, 120), (120, 1), torch.bool)
# Topologically Sorted Source Nodes: [relu_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_2.run(buf5, primals_5, buf6, 120, grid=grid(120), stream=stream0)
del primals_5
return (buf5, primals_1, primals_2, buf1, buf2, reinterpret_tensor(buf3, (1, 96), (96, 1), 0), buf6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 1, 6, 6), (36, 36, 6, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((6, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((6, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((120, 96), (96, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((120, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import time
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
def singleton(cls):
obj = cls()
cls.__new__ = staticmethod(lambda cls: obj)
try:
del cls.__init__
except AttributeError:
pass
return cls
@singleton
class model_debugger:
def __init__(self):
self.reset()
def reset(self, filename_suffix=''):
"""
Reset all the variables
Parameters
----------
filename_suffix : str, optional
the filename suffix, by default ""
Returns
-------
model_debugger
returns self
"""
self.model_forward_data = {}
self.active_forwards = {}
self.logs = []
self.time_usage = {}
self.active_timers = {}
self.called_times = {}
self.variable_log = {}
self.tenserboard_writer = SummaryWriter(filename_suffix)
self.epoch_write_counter = {}
return self
def write_to_tensorboard(self, name, value, epoch):
"""
Add value to SummaryWriter
Parameters
----------
name : str
tag name
value : number
numeric value
epoch : epoch
Current epoch, if none a built in counter will be used
"""
if epoch is None:
if name not in self.epoch_write_counter:
self.epoch_write_counter[name] = 0
epoch = self.epoch_write_counter[name]
self.epoch_write_counter[name] += 1
self.tenserboard_writer.add_scalar(name, value, epoch)
def save_tensorboard(self):
self.tenserboard_writer.close()
def get_last_round(self):
"""
Get the latest forward data for model
Returns
-------
list
with tuple of key value
"""
return self.model_forward_data.items()
def start_forward(self, name):
"""
Start a forward of component
Parameters
----------
name : str
name of the component
"""
if name not in self.active_forwards:
self.active_forwards[name] = []
def add_element(self, name, element):
"""
Add a element to the forward path of a given name
Parameters
----------
name : str
name of component
element : tensor/any
the tensor of a given state
"""
self.active_forwards[name].append(element)
def stop_forward(self, name):
"""
Complete a forward
Parameters
----------
name : str
name of the component
"""
self.model_forward_data[name] = self.active_forwards[name]
self.active_forwards[name] = []
def log(self, value):
"""
Log a value to list
Parameters
----------
value : str
log value
"""
self.logs.append(value)
def start_track_time(self, name):
"""
Track time usage of a given component
- need to be closed by stop_track_time
Parameters
----------
name : str
name of timer
"""
if name not in self.active_timers:
self.active_timers[name] = []
self.time_usage[name] = 0
self.called_times[name] = 0
self.active_timers[name].append(time.time())
self.called_times[name] += 1
def stop_track_time(self, name):
"""
Stops the timer with a given name
Parameters
----------
name : str
name of timer
"""
time_usage = time.time() - self.active_timers[name].pop(0)
self.time_usage[name] += time_usage
class shared_backbone:
def __init__(self):
self.device = torch.device('cuda' if torch.cuda.is_available() else
'cpu')
self.debugger = model_debugger()
self.main_hidden_layer_size = 64
class BasicConv(nn.Module, shared_backbone):
def __init__(self, output=120):
super().__init__()
shared_backbone.__init__(self)
self.output_size = 96
self.conv1 = nn.Conv2d(1, 6, 3)
self.fc1 = nn.Linear(self.output_size, output)
def forward(self, x):
"""
Basic forward operation on conv layer
Parameters
----------
x : torch.tensor
input data
Returns
-------
torch.tensor
the output after the operations
"""
if len(x.shape) < 4:
padding = (1,) * (4 - len(x.shape))
x = x.reshape(padding + x.shape)
x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
x = x.view(-1, self.output_size)
assert x.shape[0] == 1
assert len(x.shape) == 2
return F.relu(self.fc1(x))
def get_inputs():
return [torch.rand([4, 1, 6, 6])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import time
import torch.nn as nn
from torch.utils.tensorboard import SummaryWriter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 6
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 96
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x1), xmask, eviction_policy
='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x2, tmp15, xmask)
tl.store(out_ptr1 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 120
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x0, tmp4, xmask)
tl.store(out_ptr0 + x0, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 6, 6), (36, 36, 6, 1))
assert_size_stride(primals_2, (6, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_3, (6,), (1,))
assert_size_stride(primals_4, (120, 96), (96, 1))
assert_size_stride(primals_5, (120,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 6, 4, 4), (96, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(384)](buf1, primals_3, 384,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 6, 2, 2), (24, 4, 2, 1), torch.int8)
buf3 = empty_strided_cuda((4, 6, 2, 2), (24, 4, 2, 1), torch.float32)
triton_poi_fused_max_pool2d_with_indices_1[grid(96)](buf1, buf2,
buf3, 96, XBLOCK=128, num_warps=4, num_stages=1)
buf4 = empty_strided_cuda((1, 120), (120, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (1, 96), (0, 1), 0),
reinterpret_tensor(primals_4, (96, 120), (1, 96), 0), out=buf4)
buf5 = buf4
del buf4
buf6 = empty_strided_cuda((1, 120), (120, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(120)](buf5,
primals_5, buf6, 120, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
return buf5, primals_1, primals_2, buf1, buf2, reinterpret_tensor(buf3,
(1, 96), (96, 1), 0), buf6, primals_4
def singleton(cls):
obj = cls()
cls.__new__ = staticmethod(lambda cls: obj)
try:
del cls.__init__
except AttributeError:
pass
return cls
@singleton
class model_debugger:
def __init__(self):
self.reset()
def reset(self, filename_suffix=''):
"""
Reset all the variables
Parameters
----------
filename_suffix : str, optional
the filename suffix, by default ""
Returns
-------
model_debugger
returns self
"""
self.model_forward_data = {}
self.active_forwards = {}
self.logs = []
self.time_usage = {}
self.active_timers = {}
self.called_times = {}
self.variable_log = {}
self.tenserboard_writer = SummaryWriter(filename_suffix)
self.epoch_write_counter = {}
return self
def write_to_tensorboard(self, name, value, epoch):
"""
Add value to SummaryWriter
Parameters
----------
name : str
tag name
value : number
numeric value
epoch : epoch
Current epoch, if none a built in counter will be used
"""
if epoch is None:
if name not in self.epoch_write_counter:
self.epoch_write_counter[name] = 0
epoch = self.epoch_write_counter[name]
self.epoch_write_counter[name] += 1
self.tenserboard_writer.add_scalar(name, value, epoch)
def save_tensorboard(self):
self.tenserboard_writer.close()
def get_last_round(self):
"""
Get the latest forward data for model
Returns
-------
list
with tuple of key value
"""
return self.model_forward_data.items()
def start_forward(self, name):
"""
Start a forward of component
Parameters
----------
name : str
name of the component
"""
if name not in self.active_forwards:
self.active_forwards[name] = []
def add_element(self, name, element):
"""
Add a element to the forward path of a given name
Parameters
----------
name : str
name of component
element : tensor/any
the tensor of a given state
"""
self.active_forwards[name].append(element)
def stop_forward(self, name):
"""
Complete a forward
Parameters
----------
name : str
name of the component
"""
self.model_forward_data[name] = self.active_forwards[name]
self.active_forwards[name] = []
def log(self, value):
"""
Log a value to list
Parameters
----------
value : str
log value
"""
self.logs.append(value)
def start_track_time(self, name):
"""
Track time usage of a given component
- need to be closed by stop_track_time
Parameters
----------
name : str
name of timer
"""
if name not in self.active_timers:
self.active_timers[name] = []
self.time_usage[name] = 0
self.called_times[name] = 0
self.active_timers[name].append(time.time())
self.called_times[name] += 1
def stop_track_time(self, name):
"""
Stops the timer with a given name
Parameters
----------
name : str
name of timer
"""
time_usage = time.time() - self.active_timers[name].pop(0)
self.time_usage[name] += time_usage
class shared_backbone:
def __init__(self):
self.device = torch.device('cuda' if torch.cuda.is_available() else
'cpu')
self.debugger = model_debugger()
self.main_hidden_layer_size = 64
class BasicConvNew(nn.Module, shared_backbone):
def __init__(self, output=120):
super().__init__()
shared_backbone.__init__(self)
self.output_size = 96
self.conv1 = nn.Conv2d(1, 6, 3)
self.fc1 = nn.Linear(self.output_size, output)
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.fc1.weight
primals_5 = self.fc1.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| Zikoat/musweeper | BasicConv | false | 1,764 | [
"MIT"
] | 0 | 07e3e5e5e5e4edad4d8b1b6bb05aee2f33f8d9cb | https://github.com/Zikoat/musweeper/tree/07e3e5e5e5e4edad4d8b1b6bb05aee2f33f8d9cb | import time
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
def singleton(cls):
obj = cls()
cls.__new__ = staticmethod(lambda cls: obj)
try:
del cls.__init__
except AttributeError:
pass
return cls
@singleton
class model_debugger:
def __init__(self):
self.reset()
def reset(self, filename_suffix=''):
"""
Reset all the variables
Parameters
----------
filename_suffix : str, optional
the filename suffix, by default ""
Returns
-------
model_debugger
returns self
"""
self.model_forward_data = {}
self.active_forwards = {}
self.logs = []
self.time_usage = {}
self.active_timers = {}
self.called_times = {}
self.variable_log = {}
self.tenserboard_writer = SummaryWriter(filename_suffix)
self.epoch_write_counter = {}
return self
def write_to_tensorboard(self, name, value, epoch):
"""
Add value to SummaryWriter
Parameters
----------
name : str
tag name
value : number
numeric value
epoch : epoch
Current epoch, if none a built in counter will be used
"""
if epoch is None:
if name not in self.epoch_write_counter:
self.epoch_write_counter[name] = 0
epoch = self.epoch_write_counter[name]
self.epoch_write_counter[name] += 1
self.tenserboard_writer.add_scalar(name, value, epoch)
def save_tensorboard(self):
self.tenserboard_writer.close()
def get_last_round(self):
"""
Get the latest forward data for model
Returns
-------
list
with tuple of key value
"""
return self.model_forward_data.items()
def start_forward(self, name):
"""
Start a forward of component
Parameters
----------
name : str
name of the component
"""
if name not in self.active_forwards:
self.active_forwards[name] = []
def add_element(self, name, element):
"""
Add a element to the forward path of a given name
Parameters
----------
name : str
name of component
element : tensor/any
the tensor of a given state
"""
self.active_forwards[name].append(element)
def stop_forward(self, name):
"""
Complete a forward
Parameters
----------
name : str
name of the component
"""
self.model_forward_data[name] = self.active_forwards[name]
self.active_forwards[name] = []
def log(self, value):
"""
Log a value to list
Parameters
----------
value : str
log value
"""
self.logs.append(value)
def start_track_time(self, name):
"""
Track time usage of a given component
- need to be closed by stop_track_time
Parameters
----------
name : str
name of timer
"""
if name not in self.active_timers:
self.active_timers[name] = []
self.time_usage[name] = 0
self.called_times[name] = 0
self.active_timers[name].append(time.time())
self.called_times[name] += 1
def stop_track_time(self, name):
"""
Stops the timer with a given name
Parameters
----------
name : str
name of timer
"""
time_usage = time.time() - self.active_timers[name].pop(0)
self.time_usage[name] += time_usage
class shared_backbone:
def __init__(self):
self.device = torch.device('cuda' if torch.cuda.is_available() else
'cpu')
self.debugger = model_debugger()
self.main_hidden_layer_size = 64
class Model(nn.Module, shared_backbone):
def __init__(self, output=120):
super().__init__()
shared_backbone.__init__(self)
self.output_size = 96
self.conv1 = nn.Conv2d(1, 6, 3)
self.fc1 = nn.Linear(self.output_size, output)
def forward(self, x):
"""
Basic forward operation on conv layer
Parameters
# ... truncated (>4000 chars) for memory efficiency |
DenseGCNConv | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/ia/ciadpx4ik7ocmsqscox73ya2qxek7ekkw25w6rijleysqp7lfliw.py
# Topologically Sorted Source Nodes: [setitem], Original ATen: [aten.lift_fresh, aten.index_put]
# Source node to ATen node mapping:
# setitem => full_default, index_put
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 1.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %index_put : [num_users=2] = call_function[target=torch.ops.aten.index_put.default](args = (%primals_2, [None, %iota, %iota], %full_default), kwargs = {})
triton_poi_fused_index_put_lift_fresh_0 = async_compile.triton('triton_poi_fused_index_put_lift_fresh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_put_lift_fresh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_index_put_lift_fresh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/3a/c3ajks6gn2yuzdnbqm5gdhl6nslpowgffuevr4gjguhn4q6d6v4b.py
# Topologically Sorted Source Nodes: [setitem], Original ATen: [aten.lift_fresh, aten.index_put]
# Source node to ATen node mapping:
# setitem => full_default, index_put
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 1.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %index_put : [num_users=2] = call_function[target=torch.ops.aten.index_put.default](args = (%primals_2, [None, %iota, %iota], %full_default), kwargs = {})
triton_poi_fused_index_put_lift_fresh_1 = async_compile.triton('triton_poi_fused_index_put_lift_fresh_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_put_lift_fresh_1', 'mutated_arg_names': ['out_ptr0'], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_index_put_lift_fresh_1(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = 1.0
tl.store(out_ptr0 + ((5*x0) + (16*x1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/by/cbyfwvqabdnuhk62vp6oux4o4uoccm6mz5rjytdtikttvdu3mbex.py
# Topologically Sorted Source Nodes: [mul, adj_1], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# adj_1 => mul_1
# mul => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze, %index_put), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %unsqueeze_1), kwargs = {})
triton_poi_fused_mul_2 = async_compile.triton('triton_poi_fused_mul_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = (xindex // 4)
x4 = xindex
x0 = xindex % 4
x2 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (4*x3), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x3)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x3)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x3)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (x4), xmask)
tmp13 = tl.load(in_ptr0 + ((4*x0) + (16*x2)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (1 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (2 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr0 + (3 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 1.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = -0.5
tmp10 = libdevice.pow(tmp8, tmp9)
tmp12 = tmp10 * tmp11
tmp15 = tmp13 + tmp14
tmp17 = tmp15 + tmp16
tmp19 = tmp17 + tmp18
tmp20 = triton_helpers.maximum(tmp19, tmp7)
tmp21 = libdevice.pow(tmp20, tmp9)
tmp22 = tmp12 * tmp21
tl.store(out_ptr0 + (x4), tmp22, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/kl/ckllf6mazqhprwh2tau7vxrdurc2kivba4tykq3gqxvebmdet63s.py
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# out_1 => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/5a/c5ainvzw5uinosmtjpfsqml2wdleittg5twoainmhbtwdqayr4sw.py
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.add]
# Source node to ATen node mapping:
# out_2 => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_4, %primals_4), kwargs = {})
triton_poi_fused_add_4 = async_compile.triton('triton_poi_fused_add_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [setitem], Original ATen: [aten.lift_fresh, aten.index_put]
stream0 = get_raw_stream(0)
triton_poi_fused_index_put_lift_fresh_0.run(primals_2, buf0, 64, grid=grid(64), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [setitem], Original ATen: [aten.lift_fresh, aten.index_put]
triton_poi_fused_index_put_lift_fresh_1.run(buf0, 16, grid=grid(16), stream=stream0)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), primals_3, out=buf2)
del primals_3
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, adj_1], Original ATen: [aten.mul]
triton_poi_fused_mul_2.run(buf0, buf3, 64, grid=grid(64), stream=stream0)
del buf0
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.clone]
triton_poi_fused_clone_3.run(buf3, buf4, 256, grid=grid(256), stream=stream0)
del buf3
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), out=buf5)
del buf2
buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.add]
triton_poi_fused_add_4.run(buf6, primals_4, 256, grid=grid(256), stream=stream0)
del primals_4
return (buf6, reinterpret_tensor(buf4, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
from torch.nn import Parameter
import torch.utils.data
def glorot(tensor):
if tensor is not None:
stdv = math.sqrt(6.0 / (tensor.size(-2) + tensor.size(-1)))
tensor.data.uniform_(-stdv, stdv)
def zeros(tensor):
if tensor is not None:
tensor.data.fill_(0)
class DenseGCNConv(torch.nn.Module):
"""See :class:`torch_geometric.nn.conv.GCNConv`.
"""
def __init__(self, in_channels, out_channels, improved=False, bias=True):
super(DenseGCNConv, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.improved = improved
self.weight = Parameter(torch.Tensor(self.in_channels, out_channels))
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
glorot(self.weight)
zeros(self.bias)
def forward(self, x, adj, mask=None, add_loop=True):
"""
Args:
x (Tensor): Node feature tensor :math:`\\mathbf{X} \\in \\mathbb{R}^{B
\\times N \\times F}`, with batch-size :math:`B`, (maximum)
number of nodes :math:`N` for each graph, and feature
dimension :math:`F`.
adj (Tensor): Adjacency tensor :math:`\\mathbf{A} \\in \\mathbb{R}^{B
\\times N \\times N}`. The adjacency tensor is broadcastable in
the batch dimension, resulting in a shared adjacency matrix for
the complete batch.
mask (BoolTensor, optional): Mask matrix
:math:`\\mathbf{M} \\in {\\{ 0, 1 \\}}^{B \\times N}` indicating
the valid nodes for each graph. (default: :obj:`None`)
add_loop (bool, optional): If set to :obj:`False`, the layer will
not automatically add self-loops to the adjacency matrices.
(default: :obj:`True`)
"""
x = x.unsqueeze(0) if x.dim() == 2 else x
adj = adj.unsqueeze(0) if adj.dim() == 2 else adj
B, N, _ = adj.size()
if add_loop:
adj = adj.clone()
idx = torch.arange(N, dtype=torch.long, device=adj.device)
adj[:, idx, idx] = 1 if not self.improved else 2
out = torch.matmul(x, self.weight)
deg_inv_sqrt = adj.sum(dim=-1).clamp(min=1).pow(-0.5)
adj = deg_inv_sqrt.unsqueeze(-1) * adj * deg_inv_sqrt.unsqueeze(-2)
out = torch.matmul(adj, out)
if self.bias is not None:
out = out + self.bias
if mask is not None:
out = out * mask.view(B, N, 1)
return out
def __repr__(self):
return '{}({}, {})'.format(self.__class__.__name__, self.
in_channels, self.out_channels)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import math
from torch.nn import Parameter
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_index_put_lift_fresh_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_index_put_lift_fresh_1(out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
tmp0 = 1.0
tl.store(out_ptr0 + (5 * x0 + 16 * x1), tmp0, xmask)
@triton.jit
def triton_poi_fused_mul_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex // 4
x4 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + 4 * x3, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x3), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x3), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x3), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + x4, xmask)
tmp13 = tl.load(in_ptr0 + (4 * x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr0 + (1 + 4 * x0 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (2 + 4 * x0 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp18 = tl.load(in_ptr0 + (3 + 4 * x0 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 1.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = -0.5
tmp10 = libdevice.pow(tmp8, tmp9)
tmp12 = tmp10 * tmp11
tmp15 = tmp13 + tmp14
tmp17 = tmp15 + tmp16
tmp19 = tmp17 + tmp18
tmp20 = triton_helpers.maximum(tmp19, tmp7)
tmp21 = libdevice.pow(tmp20, tmp9)
tmp22 = tmp12 * tmp21
tl.store(out_ptr0 + x4, tmp22, xmask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x2 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_index_put_lift_fresh_0[grid(64)](primals_2, buf0,
64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_2
triton_poi_fused_index_put_lift_fresh_1[grid(16)](buf0, 16, XBLOCK=
16, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
primals_3, out=buf2)
del primals_3
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_mul_2[grid(64)](buf0, buf3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf0
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_3[grid(256)](buf3, buf4, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del buf3
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), out=buf5)
del buf2
buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused_add_4[grid(256)](buf6, primals_4, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_4
return buf6, reinterpret_tensor(buf4, (16, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(primals_1, (4, 64), (1, 4), 0)
def glorot(tensor):
if tensor is not None:
stdv = math.sqrt(6.0 / (tensor.size(-2) + tensor.size(-1)))
tensor.data.uniform_(-stdv, stdv)
def zeros(tensor):
if tensor is not None:
tensor.data.fill_(0)
class DenseGCNConvNew(torch.nn.Module):
"""See :class:`torch_geometric.nn.conv.GCNConv`.
"""
def __init__(self, in_channels, out_channels, improved=False, bias=True):
super(DenseGCNConvNew, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.improved = improved
self.weight = Parameter(torch.Tensor(self.in_channels, out_channels))
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
glorot(self.weight)
zeros(self.bias)
def __repr__(self):
return '{}({}, {})'.format(self.__class__.__name__, self.
in_channels, self.out_channels)
def forward(self, input_0, input_1):
primals_3 = self.weight
primals_4 = self.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| cshjin/pytorch_geometric | DenseGCNConv | false | 1,765 | [
"MIT"
] | 0 | 8dd0e76beb72135949a275edd851f80f7b97648f | https://github.com/cshjin/pytorch_geometric/tree/8dd0e76beb72135949a275edd851f80f7b97648f | import math
import torch
from torch.nn import Parameter
import torch.utils.data
def glorot(tensor):
if tensor is not None:
stdv = math.sqrt(6.0 / (tensor.size(-2) + tensor.size(-1)))
tensor.data.uniform_(-stdv, stdv)
def zeros(tensor):
if tensor is not None:
tensor.data.fill_(0)
class Model(torch.nn.Module):
"""See :class:`torch_geometric.nn.conv.GCNConv`.
"""
def __init__(self, in_channels, out_channels, improved=False, bias=True):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.improved = improved
self.weight = Parameter(torch.Tensor(self.in_channels, out_channels))
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
glorot(self.weight)
zeros(self.bias)
def forward(self, x, adj, mask=None, add_loop=True):
"""
Args:
x (Tensor): Node feature tensor :math:`\\mathbf{X} \\in \\mathbb{R}^{B
\\times N \\times F}`, with batch-size :math:`B`, (maximum)
number of nodes :math:`N` for each graph, and feature
dimension :math:`F`.
adj (Tensor): Adjacency tensor :math:`\\mathbf{A} \\in \\mathbb{R}^{B
\\times N \\times N}`. The adjacency tensor is broadcastable in
the batch dimension, resulting in a shared adjacency matrix for
the complete batch.
mask (BoolTensor, optional): Mask matrix
:math:`\\mathbf{M} \\in {\\{ 0, 1 \\}}^{B \\times N}` indicating
the valid nodes for each graph. (default: :obj:`None`)
add_loop (bool, optional): If set to :obj:`False`, the layer will
not automatically add self-loops to the adjacency matrices.
(default: :obj:`True`)
"""
x = x.unsqueeze(0) if x.dim() == 2 else x
adj = adj.unsqueeze(0) if adj.dim() == 2 else adj
B, N, _ = adj.size()
if add_loop:
adj = adj.clone()
idx = torch.arange(N, dtype=torch.long, device=adj.device)
adj[:, idx, idx] = 1 if not self.improved else 2
out = torch.matmul(x, self.weight)
deg_inv_sqrt = adj.sum(dim=-1).clamp(min=1).pow(-0.5)
adj = deg_inv_sqrt.unsqueeze(-1) * adj * deg_inv_sqrt.unsqueeze(-2)
out = torch.matmul(adj, out)
if self.bias is not None:
out = out + self.bias
if mask is not None:
out = out * mask.view(B, N, 1)
return out
def __repr__(self):
return '{}({}, {})'.format(self.__class__.__name__, self.
in_channels, self.out_channels)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
InnerProductProbe | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/7e/c7e6nde7zpogw2al4p3oaiveec6rvoay2qieuq2h2ihah2vqbmnq.py
# Topologically Sorted Source Nodes: [z], Original ATen: [aten.sub, aten.transpose]
# Source node to ATen node mapping:
# z => sub
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%expand, %permute_5), kwargs = {})
# %permute_12 : [num_users=1] = call_function[target=torch.ops.aten.permute.default](args = (%view_4, [0, 2, 1]), kwargs = {})
# %permute_13 : [num_users=1] = call_function[target=torch.ops.aten.permute.default](args = (%view_5, [0, 2, 1]), kwargs = {})
triton_poi_fused_sub_transpose_0 = async_compile.triton('triton_poi_fused_sub_transpose_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sub_transpose_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sub_transpose_0(in_ptr0, out_ptr0, out_ptr1, out_ptr2, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x3 = xindex
y5 = (yindex // 4)
y0 = yindex % 4
y2 = (yindex // 16)
y4 = yindex
tmp0 = tl.load(in_ptr0 + (y5 + (16*x3)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (y0 + (4*y2) + (16*x3)), xmask & ymask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tl.store(out_ptr0 + (x3 + (4*y4)), tmp2, xmask & ymask)
tl.store(out_ptr1 + (y4 + (64*x3)), tmp2, xmask & ymask)
tl.store(out_ptr2 + (y4 + (64*x3)), tmp2, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 4, 16), (64, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(primals_2, (1, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_1, (1, 4, 16), (0, 1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf3 = empty_strided_cuda((64, 4, 1), (1, 64, 256), torch.float32)
buf4 = empty_strided_cuda((64, 1, 4), (1, 256, 64), torch.float32)
# Topologically Sorted Source Nodes: [z], Original ATen: [aten.sub, aten.transpose]
stream0 = get_raw_stream(0)
triton_poi_fused_sub_transpose_0.run(buf0, buf1, buf3, buf4, 64, 4, grid=grid(64, 4), stream=stream0)
buf2 = reinterpret_tensor(buf0, (64, 1, 1), (1, 1, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [einsum_1], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf1, (64, 1, 4), (4, 0, 1), 0), reinterpret_tensor(buf1, (64, 4, 1), (4, 1, 0), 0), out=buf2)
del buf1
return (reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0), buf3, buf4, reinterpret_tensor(primals_1, (1, 16, 4), (4, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class InnerProductProbe(nn.Module):
def __init__(self, length: 'int', max_rank: 'int'=None):
super().__init__()
self.length = length
if max_rank is None:
max_rank = length
self.b = nn.Parameter(torch.empty(max_rank, length, dtype=torch.
float32).uniform_(-0.05, 0.05), requires_grad=True)
def forward(self, x):
seq_len = x.size(1)
x = torch.einsum('gh,bih->big', self.b, x)
x = x.unsqueeze(2).expand(-1, -1, seq_len, -1)
y = x.clone().permute(0, 2, 1, 3)
z = x - y
return torch.einsum('bijg,bijg->bij', z, z)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'length': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_sub_transpose_0(in_ptr0, out_ptr0, out_ptr1, out_ptr2,
ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x3 = xindex
y5 = yindex // 4
y0 = yindex % 4
y2 = yindex // 16
y4 = yindex
tmp0 = tl.load(in_ptr0 + (y5 + 16 * x3), xmask & ymask, eviction_policy
='evict_last')
tmp1 = tl.load(in_ptr0 + (y0 + 4 * y2 + 16 * x3), xmask & ymask,
eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tl.store(out_ptr0 + (x3 + 4 * y4), tmp2, xmask & ymask)
tl.store(out_ptr1 + (y4 + 64 * x3), tmp2, xmask & ymask)
tl.store(out_ptr2 + (y4 + 64 * x3), tmp2, xmask & ymask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 4, 16), (64, 16, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_2, (1, 4, 4), (16, 4,
1), 0), reinterpret_tensor(primals_1, (1, 4, 16), (0, 1, 4), 0),
out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf3 = empty_strided_cuda((64, 4, 1), (1, 64, 256), torch.float32)
buf4 = empty_strided_cuda((64, 1, 4), (1, 256, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_sub_transpose_0[grid(64, 4)](buf0, buf1, buf3,
buf4, 64, 4, XBLOCK=4, YBLOCK=64, num_warps=4, num_stages=1)
buf2 = reinterpret_tensor(buf0, (64, 1, 1), (1, 1, 1), 0)
del buf0
extern_kernels.bmm(reinterpret_tensor(buf1, (64, 1, 4), (4, 0, 1),
0), reinterpret_tensor(buf1, (64, 4, 1), (4, 1, 0), 0), out=buf2)
del buf1
return reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0
), buf3, buf4, reinterpret_tensor(primals_1, (1, 16, 4), (4, 4, 1), 0)
class InnerProductProbeNew(nn.Module):
def __init__(self, length: 'int', max_rank: 'int'=None):
super().__init__()
self.length = length
if max_rank is None:
max_rank = length
self.b = nn.Parameter(torch.empty(max_rank, length, dtype=torch.
float32).uniform_(-0.05, 0.05), requires_grad=True)
def forward(self, input_0):
primals_2 = self.b
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
| daemon/vizbert | InnerProductProbe | false | 1,766 | [
"MIT"
] | 0 | e40b7d1529f8857050313f8d87ff03b1b7226c9e | https://github.com/daemon/vizbert/tree/e40b7d1529f8857050313f8d87ff03b1b7226c9e | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, length: 'int', max_rank: 'int'=None):
super().__init__()
self.length = length
if max_rank is None:
max_rank = length
self.b = nn.Parameter(torch.empty(max_rank, length, dtype=torch.
float32).uniform_(-0.05, 0.05), requires_grad=True)
def forward(self, x):
seq_len = x.size(1)
x = torch.einsum('gh,bih->big', self.b, x)
x = x.unsqueeze(2).expand(-1, -1, seq_len, -1)
y = x.clone().permute(0, 2, 1, 3)
z = x - y
return torch.einsum('bijg,bijg->bij', z, z)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [4]
|
Encoder | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/dk/cdkfylhbmav6z7nn6vy4nwynu3ry6ilbxpjspabpseiuux7csyql.py
# Topologically Sorted Source Nodes: [input_3], Original ATen: [aten.native_group_norm]
# Source node to ATen node mapping:
# input_3 => add, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_poi_fused_native_group_norm_0 = async_compile.triton('triton_poi_fused_native_group_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_group_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp1, tmp3)
tmp5 = tmp2 + tmp4
tmp7 = triton_helpers.maximum(tmp1, tmp6)
tmp8 = tmp5 + tmp7
tmp10 = triton_helpers.maximum(tmp1, tmp9)
tmp11 = tmp8 + tmp10
tmp12 = 4.0
tmp13 = tmp11 / tmp12
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp14
tmp16 = tmp4 - tmp13
tmp17 = tmp16 * tmp16
tmp18 = tmp15 + tmp17
tmp19 = tmp7 - tmp13
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp10 - tmp13
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp24 / tmp12
tmp26 = 1e-05
tmp27 = tmp25 + tmp26
tmp28 = libdevice.rsqrt(tmp27)
tl.store(out_ptr0 + (x0), tmp13, xmask)
tl.store(out_ptr1 + (x0), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/kd/ckdswb4lufwiij2wklkypvchblxzpif4ph4uxoqoklvcbijmlski.py
# Topologically Sorted Source Nodes: [input_3], Original ATen: [aten.native_group_norm]
# Source node to ATen node mapping:
# input_3 => add_1, mul_1
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %unsqueeze_7), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %unsqueeze_4), kwargs = {})
triton_poi_fused_native_group_norm_1 = async_compile.triton('triton_poi_fused_native_group_norm_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_group_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = (xindex // 4)
x1 = (xindex // 4) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp3 = tl.load(in_ptr1 + (x4), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + (x4), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last')
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = tmp2 - tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 * tmp7
tmp10 = tmp8 + tmp9
tl.store(out_ptr0 + (x3), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 8, 4, 4), (128, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool3d_with_indices]
buf0 = torch.ops.aten.max_pool3d_with_indices.default(primals_1, [2, 2, 2], [2, 2, 2])
del primals_1
buf1 = buf0[0]
del buf0
# Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(reinterpret_tensor(buf1, (1, 4, 4, 2, 2), (64, 16, 4, 2, 1), 0), primals_2, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf3, (1, 4, 4, 2, 2), (64, 16, 4, 2, 1))
buf4 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf5 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
# Topologically Sorted Source Nodes: [input_3], Original ATen: [aten.native_group_norm]
stream0 = get_raw_stream(0)
triton_poi_fused_native_group_norm_0.run(buf3, buf4, buf5, 16, grid=grid(16), stream=stream0)
buf6 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_3], Original ATen: [aten.native_group_norm]
triton_poi_fused_native_group_norm_1.run(buf3, buf4, buf5, primals_3, primals_4, buf6, 64, grid=grid(64), stream=stream0)
del primals_4
# Topologically Sorted Source Nodes: [input_4], Original ATen: [aten.convolution]
buf7 = extern_kernels.convolution(reinterpret_tensor(buf6, (1, 4, 4, 2, 2), (0, 16, 4, 2, 1), 0), primals_5, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf7, (1, 4, 4, 2, 2), (64, 16, 4, 2, 1))
buf8 = buf5; del buf5 # reuse
buf9 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [input_6], Original ATen: [aten.native_group_norm]
triton_poi_fused_native_group_norm_0.run(buf7, buf8, buf9, 16, grid=grid(16), stream=stream0)
buf10 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_6], Original ATen: [aten.native_group_norm]
triton_poi_fused_native_group_norm_1.run(buf7, buf8, buf9, primals_6, primals_7, buf10, 64, grid=grid(64), stream=stream0)
del buf8
del buf9
del primals_7
return (buf10, primals_2, primals_3, primals_5, primals_6, reinterpret_tensor(buf1, (1, 4, 4, 2, 2), (64, 16, 4, 2, 1), 0), buf3, reinterpret_tensor(buf6, (1, 4, 4, 2, 2), (64, 16, 4, 2, 1), 0), buf7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 8, 4, 4), (128, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 3, 3, 3), (108, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4, 3, 3, 3), (108, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn as nn
def conv3d(in_channels, out_channels, kernel_size, bias, padding=1):
return nn.Conv3d(in_channels, out_channels, kernel_size, padding=
padding, bias=bias)
def create_conv(in_channels, out_channels, kernel_size, order, num_groups,
padding=1):
"""
Create a list of modules with together constitute a single conv layer with non-linearity
and optional batchnorm/groupnorm.
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
order (string): order of things, e.g.
'cr' -> conv + ReLU
'crg' -> conv + ReLU + groupnorm
'cl' -> conv + LeakyReLU
'ce' -> conv + ELU
num_groups (int): number of groups for the GroupNorm
padding (int): add zero-padding to the input
Return:
list of tuple (name, module)
"""
assert 'c' in order, 'Conv layer MUST be present'
assert order[0
] not in 'rle', 'Non-linearity cannot be the first operation in the layer'
modules = []
for i, char in enumerate(order):
if char == 'r':
modules.append(('ReLU', nn.ReLU(inplace=True)))
elif char == 'l':
modules.append(('LeakyReLU', nn.LeakyReLU(negative_slope=0.1,
inplace=True)))
elif char == 'e':
modules.append(('ELU', nn.ELU(inplace=True)))
elif char == 'c':
bias = not ('g' in order or 'b' in order)
modules.append(('conv', conv3d(in_channels, out_channels,
kernel_size, bias, padding=padding)))
elif char == 'g':
is_before_conv = i < order.index('c')
assert not is_before_conv, 'GroupNorm MUST go after the Conv3d'
if out_channels < num_groups:
num_groups = out_channels
modules.append(('groupnorm', nn.GroupNorm(num_groups=num_groups,
num_channels=out_channels)))
elif char == 'b':
is_before_conv = i < order.index('c')
if is_before_conv:
modules.append(('batchnorm', nn.BatchNorm3d(in_channels)))
else:
modules.append(('batchnorm', nn.BatchNorm3d(out_channels)))
else:
raise ValueError(
f"Unsupported layer type '{char}'. MUST be one of ['b', 'g', 'r', 'l', 'e', 'c']"
)
return modules
class SingleConv(nn.Sequential):
"""
Basic convolutional module consisting of a Conv3d, non-linearity and optional batchnorm/groupnorm. The order
of operations can be specified via the `order` parameter
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
kernel_size (int): size of the convolving kernel
order (string): determines the order of layers, e.g.
'cr' -> conv + ReLU
'crg' -> conv + ReLU + groupnorm
'cl' -> conv + LeakyReLU
'ce' -> conv + ELU
num_groups (int): number of groups for the GroupNorm
"""
def __init__(self, in_channels, out_channels, kernel_size=3, order=
'crg', num_groups=8, padding=1):
super(SingleConv, self).__init__()
for name, module in create_conv(in_channels, out_channels,
kernel_size, order, num_groups, padding=padding):
self.add_module(name, module)
class DoubleConv(nn.Sequential):
"""
A module consisting of two consecutive convolution layers (e.g. BatchNorm3d+ReLU+Conv3d).
We use (Conv3d+ReLU+GroupNorm3d) by default.
This can be changed however by providing the 'order' argument, e.g. in order
to change to Conv3d+BatchNorm3d+ELU use order='cbe'.
Use padded convolutions to make sure that the output (H_out, W_out) is the same
as (H_in, W_in), so that you don't have to crop in the decoder path.
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
encoder (bool): if True we're in the encoder path, otherwise we're in the decoder
kernel_size (int): size of the convolving kernel
order (string): determines the order of layers, e.g.
'cr' -> conv + ReLU
'crg' -> conv + ReLU + groupnorm
'cl' -> conv + LeakyReLU
'ce' -> conv + ELU
num_groups (int): number of groups for the GroupNorm
"""
def __init__(self, in_channels, out_channels, encoder, kernel_size=3,
order='crg', num_groups=8):
super(DoubleConv, self).__init__()
if encoder:
conv1_in_channels = in_channels
conv1_out_channels = out_channels // 2
if conv1_out_channels < in_channels:
conv1_out_channels = in_channels
conv2_in_channels, conv2_out_channels = (conv1_out_channels,
out_channels)
else:
conv1_in_channels, conv1_out_channels = in_channels, out_channels
conv2_in_channels, conv2_out_channels = out_channels, out_channels
self.add_module('SingleConv1', SingleConv(conv1_in_channels,
conv1_out_channels, kernel_size, order, num_groups))
self.add_module('SingleConv2', SingleConv(conv2_in_channels,
conv2_out_channels, kernel_size, order, num_groups))
class Encoder(nn.Module):
"""
A single module from the encoder path consisting of the optional max
pooling layer (one may specify the MaxPool kernel_size to be different
than the standard (2,2,2), e.g. if the volumetric data is anisotropic
(make sure to use complementary scale_factor in the decoder path) followed by
a DoubleConv module.
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
conv_kernel_size (int): size of the convolving kernel
apply_pooling (bool): if True use MaxPool3d before DoubleConv
pool_kernel_size (tuple): the size of the window to take a max over
pool_type (str): pooling layer: 'max' or 'avg'
basic_module(nn.Module): either ResNetBlock or DoubleConv
conv_layer_order (string): determines the order of layers
in `DoubleConv` module. See `DoubleConv` for more info.
num_groups (int): number of groups for the GroupNorm
"""
def __init__(self, in_channels, out_channels, conv_kernel_size=3,
apply_pooling=True, pool_kernel_size=(2, 2, 2), pool_type='max',
basic_module=DoubleConv, conv_layer_order='crg', num_groups=8):
super(Encoder, self).__init__()
assert pool_type in ['max', 'avg']
if apply_pooling:
if pool_type == 'max':
self.pooling = nn.MaxPool3d(kernel_size=pool_kernel_size)
else:
self.pooling = nn.AvgPool3d(kernel_size=pool_kernel_size)
else:
self.pooling = None
self.basic_module = basic_module(in_channels, out_channels, encoder
=True, kernel_size=conv_kernel_size, order=conv_layer_order,
num_groups=num_groups)
def forward(self, x):
if self.pooling is not None:
x = self.pooling(x)
x = self.basic_module(x)
return x
def get_inputs():
return [torch.rand([4, 8, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_group_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp1, tmp3)
tmp5 = tmp2 + tmp4
tmp7 = triton_helpers.maximum(tmp1, tmp6)
tmp8 = tmp5 + tmp7
tmp10 = triton_helpers.maximum(tmp1, tmp9)
tmp11 = tmp8 + tmp10
tmp12 = 4.0
tmp13 = tmp11 / tmp12
tmp14 = tmp2 - tmp13
tmp15 = tmp14 * tmp14
tmp16 = tmp4 - tmp13
tmp17 = tmp16 * tmp16
tmp18 = tmp15 + tmp17
tmp19 = tmp7 - tmp13
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp10 - tmp13
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp24 / tmp12
tmp26 = 1e-05
tmp27 = tmp25 + tmp26
tmp28 = libdevice.rsqrt(tmp27)
tl.store(out_ptr0 + x0, tmp13, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_native_group_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = xindex // 4
x1 = xindex // 4 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp3 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = tmp2 - tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 * tmp7
tmp10 = tmp8 + tmp9
tl.store(out_ptr0 + x3, tmp10, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 8, 4, 4), (128, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = torch.ops.aten.max_pool3d_with_indices.default(primals_1, [2,
2, 2], [2, 2, 2])
del primals_1
buf1 = buf0[0]
del buf0
buf3 = extern_kernels.convolution(reinterpret_tensor(buf1, (1, 4, 4,
2, 2), (64, 16, 4, 2, 1), 0), primals_2, stride=(1, 1, 1),
padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf3, (1, 4, 4, 2, 2), (64, 16, 4, 2, 1))
buf4 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf5 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_group_norm_0[grid(16)](buf3, buf4, buf5, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf6 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
triton_poi_fused_native_group_norm_1[grid(64)](buf3, buf4, buf5,
primals_3, primals_4, buf6, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del primals_4
buf7 = extern_kernels.convolution(reinterpret_tensor(buf6, (1, 4, 4,
2, 2), (0, 16, 4, 2, 1), 0), primals_5, stride=(1, 1, 1),
padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf7, (1, 4, 4, 2, 2), (64, 16, 4, 2, 1))
buf8 = buf5
del buf5
buf9 = buf4
del buf4
triton_poi_fused_native_group_norm_0[grid(16)](buf7, buf8, buf9, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf10 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
triton_poi_fused_native_group_norm_1[grid(64)](buf7, buf8, buf9,
primals_6, primals_7, buf10, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del buf8
del buf9
del primals_7
return (buf10, primals_2, primals_3, primals_5, primals_6,
reinterpret_tensor(buf1, (1, 4, 4, 2, 2), (64, 16, 4, 2, 1), 0),
buf3, reinterpret_tensor(buf6, (1, 4, 4, 2, 2), (64, 16, 4, 2, 1),
0), buf7)
def conv3d(in_channels, out_channels, kernel_size, bias, padding=1):
return nn.Conv3d(in_channels, out_channels, kernel_size, padding=
padding, bias=bias)
def create_conv(in_channels, out_channels, kernel_size, order, num_groups,
padding=1):
"""
Create a list of modules with together constitute a single conv layer with non-linearity
and optional batchnorm/groupnorm.
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
order (string): order of things, e.g.
'cr' -> conv + ReLU
'crg' -> conv + ReLU + groupnorm
'cl' -> conv + LeakyReLU
'ce' -> conv + ELU
num_groups (int): number of groups for the GroupNorm
padding (int): add zero-padding to the input
Return:
list of tuple (name, module)
"""
assert 'c' in order, 'Conv layer MUST be present'
assert order[0
] not in 'rle', 'Non-linearity cannot be the first operation in the layer'
modules = []
for i, char in enumerate(order):
if char == 'r':
modules.append(('ReLU', nn.ReLU(inplace=True)))
elif char == 'l':
modules.append(('LeakyReLU', nn.LeakyReLU(negative_slope=0.1,
inplace=True)))
elif char == 'e':
modules.append(('ELU', nn.ELU(inplace=True)))
elif char == 'c':
bias = not ('g' in order or 'b' in order)
modules.append(('conv', conv3d(in_channels, out_channels,
kernel_size, bias, padding=padding)))
elif char == 'g':
is_before_conv = i < order.index('c')
assert not is_before_conv, 'GroupNorm MUST go after the Conv3d'
if out_channels < num_groups:
num_groups = out_channels
modules.append(('groupnorm', nn.GroupNorm(num_groups=num_groups,
num_channels=out_channels)))
elif char == 'b':
is_before_conv = i < order.index('c')
if is_before_conv:
modules.append(('batchnorm', nn.BatchNorm3d(in_channels)))
else:
modules.append(('batchnorm', nn.BatchNorm3d(out_channels)))
else:
raise ValueError(
f"Unsupported layer type '{char}'. MUST be one of ['b', 'g', 'r', 'l', 'e', 'c']"
)
return modules
class SingleConv(nn.Sequential):
"""
Basic convolutional module consisting of a Conv3d, non-linearity and optional batchnorm/groupnorm. The order
of operations can be specified via the `order` parameter
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
kernel_size (int): size of the convolving kernel
order (string): determines the order of layers, e.g.
'cr' -> conv + ReLU
'crg' -> conv + ReLU + groupnorm
'cl' -> conv + LeakyReLU
'ce' -> conv + ELU
num_groups (int): number of groups for the GroupNorm
"""
def __init__(self, in_channels, out_channels, kernel_size=3, order=
'crg', num_groups=8, padding=1):
super(SingleConv, self).__init__()
for name, module in create_conv(in_channels, out_channels,
kernel_size, order, num_groups, padding=padding):
self.add_module(name, module)
class DoubleConv(nn.Sequential):
"""
A module consisting of two consecutive convolution layers (e.g. BatchNorm3d+ReLU+Conv3d).
We use (Conv3d+ReLU+GroupNorm3d) by default.
This can be changed however by providing the 'order' argument, e.g. in order
to change to Conv3d+BatchNorm3d+ELU use order='cbe'.
Use padded convolutions to make sure that the output (H_out, W_out) is the same
as (H_in, W_in), so that you don't have to crop in the decoder path.
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
encoder (bool): if True we're in the encoder path, otherwise we're in the decoder
kernel_size (int): size of the convolving kernel
order (string): determines the order of layers, e.g.
'cr' -> conv + ReLU
'crg' -> conv + ReLU + groupnorm
'cl' -> conv + LeakyReLU
'ce' -> conv + ELU
num_groups (int): number of groups for the GroupNorm
"""
def __init__(self, in_channels, out_channels, encoder, kernel_size=3,
order='crg', num_groups=8):
super(DoubleConv, self).__init__()
if encoder:
conv1_in_channels = in_channels
conv1_out_channels = out_channels // 2
if conv1_out_channels < in_channels:
conv1_out_channels = in_channels
conv2_in_channels, conv2_out_channels = (conv1_out_channels,
out_channels)
else:
conv1_in_channels, conv1_out_channels = in_channels, out_channels
conv2_in_channels, conv2_out_channels = out_channels, out_channels
self.add_module('SingleConv1', SingleConv(conv1_in_channels,
conv1_out_channels, kernel_size, order, num_groups))
self.add_module('SingleConv2', SingleConv(conv2_in_channels,
conv2_out_channels, kernel_size, order, num_groups))
class EncoderNew(nn.Module):
"""
A single module from the encoder path consisting of the optional max
pooling layer (one may specify the MaxPool kernel_size to be different
than the standard (2,2,2), e.g. if the volumetric data is anisotropic
(make sure to use complementary scale_factor in the decoder path) followed by
a DoubleConv module.
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
conv_kernel_size (int): size of the convolving kernel
apply_pooling (bool): if True use MaxPool3d before DoubleConv
pool_kernel_size (tuple): the size of the window to take a max over
pool_type (str): pooling layer: 'max' or 'avg'
basic_module(nn.Module): either ResNetBlock or DoubleConv
conv_layer_order (string): determines the order of layers
in `DoubleConv` module. See `DoubleConv` for more info.
num_groups (int): number of groups for the GroupNorm
"""
def __init__(self, in_channels, out_channels, conv_kernel_size=3,
apply_pooling=True, pool_kernel_size=(2, 2, 2), pool_type='max',
basic_module=DoubleConv, conv_layer_order='crg', num_groups=8):
super(EncoderNew, self).__init__()
assert pool_type in ['max', 'avg']
if apply_pooling:
if pool_type == 'max':
self.pooling = nn.MaxPool3d(kernel_size=pool_kernel_size)
else:
self.pooling = nn.AvgPool3d(kernel_size=pool_kernel_size)
else:
self.pooling = None
self.basic_module = basic_module(in_channels, out_channels, encoder
=True, kernel_size=conv_kernel_size, order=conv_layer_order,
num_groups=num_groups)
def forward(self, input_0):
primals_2 = self.basic_module.SingleConv1.conv.weight
primals_3 = self.basic_module.SingleConv1.groupnorm.weight
primals_4 = self.basic_module.SingleConv1.groupnorm.bias
primals_5 = self.basic_module.SingleConv2.conv.weight
primals_6 = self.basic_module.SingleConv2.groupnorm.weight
primals_7 = self.basic_module.SingleConv2.groupnorm.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| cviaai/TMJ_tracking | Encoder | false | 1,767 | [
"MIT"
] | 0 | 464ca21dbeb538dc9504bd5d0e5c4d92591e69c4 | https://github.com/cviaai/TMJ_tracking/tree/464ca21dbeb538dc9504bd5d0e5c4d92591e69c4 | import torch
from torch import nn as nn
def conv3d(in_channels, out_channels, kernel_size, bias, padding=1):
return nn.Conv3d(in_channels, out_channels, kernel_size, padding=
padding, bias=bias)
def create_conv(in_channels, out_channels, kernel_size, order, num_groups,
padding=1):
"""
Create a list of modules with together constitute a single conv layer with non-linearity
and optional batchnorm/groupnorm.
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
order (string): order of things, e.g.
'cr' -> conv + ReLU
'crg' -> conv + ReLU + groupnorm
'cl' -> conv + LeakyReLU
'ce' -> conv + ELU
num_groups (int): number of groups for the GroupNorm
padding (int): add zero-padding to the input
Return:
list of tuple (name, module)
"""
assert 'c' in order, 'Conv layer MUST be present'
assert order[0
] not in 'rle', 'Non-linearity cannot be the first operation in the layer'
modules = []
for i, char in enumerate(order):
if char == 'r':
modules.append(('ReLU', nn.ReLU(inplace=True)))
elif char == 'l':
modules.append(('LeakyReLU', nn.LeakyReLU(negative_slope=0.1,
inplace=True)))
elif char == 'e':
modules.append(('ELU', nn.ELU(inplace=True)))
elif char == 'c':
bias = not ('g' in order or 'b' in order)
modules.append(('conv', conv3d(in_channels, out_channels,
kernel_size, bias, padding=padding)))
elif char == 'g':
is_before_conv = i < order.index('c')
assert not is_before_conv, 'GroupNorm MUST go after the Conv3d'
if out_channels < num_groups:
num_groups = out_channels
modules.append(('groupnorm', nn.GroupNorm(num_groups=num_groups,
num_channels=out_channels)))
elif char == 'b':
is_before_conv = i < order.index('c')
if is_before_conv:
modules.append(('batchnorm', nn.BatchNorm3d(in_channels)))
else:
modules.append(('batchnorm', nn.BatchNorm3d(out_channels)))
else:
raise ValueError(
f"Unsupported layer type '{char}'. MUST be one of ['b', 'g', 'r', 'l', 'e', 'c']"
)
return modules
class SingleConv(nn.Sequential):
"""
Basic convolutional module consisting of a Conv3d, non-linearity and optional batchnorm/groupnorm. The order
of operations can be specified via the `order` parameter
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
kernel_size (int): size of the convolving kernel
order (string): determines the order of layers, e.g.
'cr' -> conv + ReLU
'crg' -> conv + ReLU + groupnorm
'cl' -> conv + LeakyReLU
'ce' -> conv + ELU
num_groups (int): number of groups for the GroupNorm
"""
def __init__(self, in_channels, out_channels, kernel_size=3, order=
'crg', num_groups=8, padding=1):
super().__init__()
for name, module in create_conv(in_channels, out_channels,
kernel_size, order, num_groups, padding=padding):
self.add_module(name, module)
class DoubleConv(nn.Sequential):
"""
A module consisting of two consecutive convolution layers (e.g. BatchNorm3d+ReLU+Conv3d).
We use (Conv3d+ReLU+GroupNorm3d) by default.
This can be changed however by providing the 'order' argument, e.g. in order
to change to Conv3d+BatchNorm3d+ELU use order='cbe'.
Use padded convolutions to make sure that the output (H_out, W_out) is the same
as (H_in, W_in), so that you don't have to crop in the decoder path.
Args:
in_channels (int): number of input channels
out_channels (int): number
# ... truncated (>4000 chars) for memory efficiency |
MultiHead | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/v6/cv6odvhmmcyvquog4eo62pdliew53orxzwe2wfzampr64jy3ppa7.py
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.add]
# Source node to ATen node mapping:
# out_1 => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_5), kwargs = {})
triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/pr/cpr46kebggqhplwxfjepx3opy5gwceg23zjukixehtkuuugc5juo.py
# Topologically Sorted Source Nodes: [score], Original ATen: [aten.bmm, aten.transpose]
# Source node to ATen node mapping:
# score => bmm
# Graph fragment:
# %bmm : [num_users=2] = call_function[target=torch.ops.aten.bmm.default](args = (%view_22, %view_25), kwargs = {})
# %permute_16 : [num_users=1] = call_function[target=torch.ops.aten.permute.default](args = (%view_22, [0, 2, 1]), kwargs = {})
triton_poi_fused_bmm_transpose_1 = async_compile.triton('triton_poi_fused_bmm_transpose_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_bmm_transpose_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_bmm_transpose_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x1) + (16*x2) + (64*((x1 + (4*(x2 % 4))) // 16))), xmask)
tl.store(out_ptr0 + (x3), tmp0, xmask)
tl.store(out_ptr1 + (x3), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/s3/cs35hxhkkmqsjvinhszu6rfwn3hooazjypcbwiv3xr2ywjn3ewei.py
# Topologically Sorted Source Nodes: [score_1, max_1, src_max, sub, out_6], Original ATen: [aten.div, aten.max, aten.clamp, aten.sub, aten.exp]
# Source node to ATen node mapping:
# max_1 => max_1
# out_6 => exp
# score_1 => div
# src_max => clamp_min
# sub => sub
# Graph fragment:
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_26, 2.0), kwargs = {})
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%div, -1, True), kwargs = {})
# %clamp_min : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%getitem, 0), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div, %clamp_min), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused_clamp_div_exp_max_sub_2 = async_compile.triton('triton_poi_fused_clamp_div_exp_max_sub_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_div_exp_max_sub_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clamp_div_exp_max_sub_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = 0.0
tmp15 = triton_helpers.maximum(tmp13, tmp14)
tmp16 = tmp2 - tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + (x2), tmp17, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/rq/crqro5go55q5jp5dqpl4be2mqfi5nsh7elfb6swmltvboawz2isq.py
# Topologically Sorted Source Nodes: [score_1, max_1, src_max, sum_1, sub_1, exp_1, add], Original ATen: [aten.div, aten.max, aten.clamp, aten.sum, aten.rsub, aten.exp, aten.add]
# Source node to ATen node mapping:
# add => add_3
# exp_1 => exp_1
# max_1 => max_1
# score_1 => div
# src_max => clamp_min
# sub_1 => sub_1
# sum_1 => sum_1
# Graph fragment:
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_26, 2.0), kwargs = {})
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%div, -1, True), kwargs = {})
# %clamp_min : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%getitem, 0), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (0, %clamp_min), kwargs = {})
# %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, %exp_1), kwargs = {})
triton_poi_fused_add_clamp_div_exp_max_rsub_sum_3 = async_compile.triton('triton_poi_fused_add_clamp_div_exp_max_rsub_sum_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_exp_max_rsub_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_div_exp_max_rsub_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = 0.5
tmp9 = tmp7 * tmp8
tmp11 = tmp10 * tmp8
tmp12 = triton_helpers.maximum(tmp9, tmp11)
tmp14 = tmp13 * tmp8
tmp15 = triton_helpers.maximum(tmp12, tmp14)
tmp17 = tmp16 * tmp8
tmp18 = triton_helpers.maximum(tmp15, tmp17)
tmp19 = 0.0
tmp20 = triton_helpers.maximum(tmp18, tmp19)
tmp21 = tmp19 - tmp20
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp6 + tmp22
tl.store(out_ptr0 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/tc/ctcdm2hcp45nesppfpnqkkcad3pvaqhoo4vgpxv6xzpnd4nc6svo.py
# Topologically Sorted Source Nodes: [score_1, max_1, src_max, sum_1, sub_1, exp_1, add, out_7], Original ATen: [aten.div, aten.max, aten.clamp, aten.sum, aten.rsub, aten.exp, aten.add]
# Source node to ATen node mapping:
# add => add_3
# exp_1 => exp_1
# max_1 => max_1
# out_7 => div_1
# score_1 => div
# src_max => clamp_min
# sub_1 => sub_1
# sum_1 => sum_1
# Graph fragment:
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_26, 2.0), kwargs = {})
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%div, -1, True), kwargs = {})
# %clamp_min : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%getitem, 0), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (0, %clamp_min), kwargs = {})
# %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, %exp_1), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %add_3), kwargs = {})
triton_poi_fused_add_clamp_div_exp_max_rsub_sum_4 = async_compile.triton('triton_poi_fused_add_clamp_div_exp_max_rsub_sum_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_exp_max_rsub_sum_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_div_exp_max_rsub_sum_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 / tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1, 4, 4), (16, 4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (1, 4, 4), (16, 4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (1, 4, 4), (16, 4, 1))
assert_size_stride(primals_9, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (4, 1), 0), out=buf0)
del primals_4
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (4, 1), 0), out=buf1)
del primals_6
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_4], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (4, 1), 0), out=buf2)
del primals_8
buf3 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_0.run(buf3, primals_5, 256, grid=grid(256), stream=stream0)
del primals_5
buf4 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.add]
triton_poi_fused_add_0.run(buf4, primals_7, 256, grid=grid(256), stream=stream0)
del primals_7
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
buf15 = empty_strided_cuda((16, 4, 4), (16, 1, 4), torch.float32)
# Topologically Sorted Source Nodes: [score], Original ATen: [aten.bmm, aten.transpose]
triton_poi_fused_bmm_transpose_1.run(buf3, buf5, buf15, 256, grid=grid(256), stream=stream0)
buf6 = reinterpret_tensor(buf3, (16, 4, 4), (16, 1, 4), 0); del buf3 # reuse
buf16 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [score], Original ATen: [aten.bmm, aten.transpose]
triton_poi_fused_bmm_transpose_1.run(buf4, buf6, buf16, 256, grid=grid(256), stream=stream0)
buf7 = reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [score], Original ATen: [aten.bmm]
extern_kernels.bmm(buf5, buf6, out=buf7)
buf8 = reinterpret_tensor(buf6, (4, 4, 1, 4, 4), (64, 16, 256, 4, 1), 0); del buf6 # reuse
# Topologically Sorted Source Nodes: [score_1, max_1, src_max, sub, out_6], Original ATen: [aten.div, aten.max, aten.clamp, aten.sub, aten.exp]
triton_poi_fused_clamp_div_exp_max_sub_2.run(buf7, buf8, 256, grid=grid(256), stream=stream0)
buf9 = empty_strided_cuda((4, 4, 1, 4, 1), (16, 4, 64, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [score_1, max_1, src_max, sum_1, sub_1, exp_1, add], Original ATen: [aten.div, aten.max, aten.clamp, aten.sum, aten.rsub, aten.exp, aten.add]
triton_poi_fused_add_clamp_div_exp_max_rsub_sum_3.run(buf8, buf7, buf9, 64, grid=grid(64), stream=stream0)
buf10 = reinterpret_tensor(buf8, (4, 4, 1, 4, 4), (64, 16, 16, 4, 1), 0); del buf8 # reuse
# Topologically Sorted Source Nodes: [score_1, max_1, src_max, sum_1, sub_1, exp_1, add, out_7], Original ATen: [aten.div, aten.max, aten.clamp, aten.sum, aten.rsub, aten.exp, aten.add]
triton_poi_fused_add_clamp_div_exp_max_rsub_sum_4.run(buf10, buf9, 256, grid=grid(256), stream=stream0)
del buf9
buf11 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [out_5], Original ATen: [aten.add]
triton_poi_fused_add_0.run(buf11, primals_9, 256, grid=grid(256), stream=stream0)
del primals_9
buf12 = buf5; del buf5 # reuse
buf14 = empty_strided_cuda((16, 4, 4), (16, 1, 4), torch.float32)
# Topologically Sorted Source Nodes: [out_8], Original ATen: [aten.bmm, aten.transpose]
triton_poi_fused_bmm_transpose_1.run(buf11, buf12, buf14, 256, grid=grid(256), stream=stream0)
buf13 = reinterpret_tensor(buf11, (16, 4, 4), (16, 4, 1), 0); del buf11 # reuse
# Topologically Sorted Source Nodes: [out_8], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf10, (16, 4, 4), (16, 4, 1), 0), buf12, out=buf13)
del buf12
return (reinterpret_tensor(buf13, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf7, buf10, buf14, buf15, buf16, reinterpret_tensor(primals_3, (4, 64), (1, 4), 0), reinterpret_tensor(primals_2, (4, 64), (1, 4), 0), reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
from torch import Tensor
from torch.nn import Linear
import torch.nn.functional as F
from torch.nn import Parameter
import torch.utils.data
def uniform(size, tensor):
bound = 1.0 / math.sqrt(size)
if tensor is not None:
tensor.data.uniform_(-bound, bound)
def kaiming_uniform(tensor, fan, a):
if tensor is not None:
bound = math.sqrt(6 / ((1 + a ** 2) * fan))
tensor.data.uniform_(-bound, bound)
def restricted_softmax(src, dim=-1, margin=0):
src_max = torch.clamp(src.max(dim=dim, keepdim=True)[0], min=0)
out = (src - src_max).exp()
out = out / (out.sum(dim=dim, keepdim=True) + (margin - src_max).exp())
return out
class Linear(torch.nn.Module):
def __init__(self, in_channels, out_channels, groups=1, bias=True):
super(Linear, self).__init__()
assert in_channels % groups == 0 and out_channels % groups == 0
self.in_channels = in_channels
self.out_channels = out_channels
self.groups = groups
self.weight = Parameter(Tensor(groups, in_channels // groups,
out_channels // groups))
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
kaiming_uniform(self.weight, fan=self.weight.size(1), a=math.sqrt(5))
uniform(self.weight.size(1), self.bias)
def forward(self, src):
if self.groups > 1:
size = list(src.size())[:-1]
src = src.view(-1, self.groups, self.in_channels // self.groups)
src = src.transpose(0, 1).contiguous()
out = torch.matmul(src, self.weight)
out = out.transpose(1, 0).contiguous()
out = out.view(*(size + [self.out_channels]))
else:
out = torch.matmul(src, self.weight.squeeze(0))
if self.bias is not None:
out += self.bias
return out
def __repr__(self):
return '{}({}, {}, groups={}, bias={})'.format(self.__class__.
__name__, self.in_channels, self.out_channels, self.groups,
self.bias is not None)
class Attention(torch.nn.Module):
def __init__(self, dropout=0):
super(Attention, self).__init__()
self.dropout = dropout
def forward(self, query, key, value):
assert query.dim() == key.dim() == value.dim() >= 2
assert query.size(-1) == key.size(-1)
assert key.size(-2) == value.size(-2)
score = torch.matmul(query, key.transpose(-2, -1))
score = score / math.sqrt(key.size(-1))
score = restricted_softmax(score, dim=-1)
score = F.dropout(score, p=self.dropout, training=self.training)
return torch.matmul(score, value)
def __repr__(self):
return '{}(dropout={})'.format(self.__class__.__name__, self.dropout)
class MultiHead(Attention):
def __init__(self, in_channels, out_channels, heads=1, groups=1,
dropout=0, bias=True):
super(MultiHead, self).__init__(dropout)
self.in_channels = in_channels
self.out_channels = out_channels
self.heads = heads
self.groups = groups
self.bias = bias
assert in_channels % heads == 0 and out_channels % heads == 0
assert in_channels % groups == 0 and out_channels % groups == 0
assert max(groups, self.heads) % min(groups, self.heads) == 0
self.lin_q = Linear(in_channels, out_channels, groups, bias)
self.lin_k = Linear(in_channels, out_channels, groups, bias)
self.lin_v = Linear(in_channels, out_channels, groups, bias)
self.reset_parameters()
def reset_parameters(self):
self.lin_q.reset_parameters()
self.lin_k.reset_parameters()
self.lin_v.reset_parameters()
def forward(self, query, key, value):
assert query.dim() == key.dim() == value.dim() >= 2
assert query.size(-1) == key.size(-1) == value.size(-1)
assert key.size(-2) == value.size(-2)
query = self.lin_q(query)
key = self.lin_k(key)
value = self.lin_v(value)
size = list(query.size())[:-2]
out_channels_per_head = self.out_channels // self.heads
query_size = size + [query.size(-2), self.heads, out_channels_per_head]
query = query.view(*query_size).transpose(-2, -3)
key_size = size + [key.size(-2), self.heads, out_channels_per_head]
key = key.view(*key_size).transpose(-2, -3)
value_size = size + [value.size(-2), self.heads, out_channels_per_head]
value = value.view(*value_size).transpose(-2, -3)
out = super(MultiHead, self).forward(query, key, value)
out = out.transpose(-3, -2).contiguous()
out = out.view(*(size + [query.size(-2), self.out_channels]))
return out
def __repr__(self):
return '{}({}, {}, heads={}, groups={}, dropout={}, bias={})'.format(
self.__class__.__name__, self.in_channels, self.out_channels,
self.heads, self.groups, self.dropout, self.bias)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
from torch import Tensor
from torch.nn import Linear
import torch.nn.functional as F
from torch.nn import Parameter
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_bmm_transpose_1(in_ptr0, out_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2 + 64 * ((x1 + 4 * (x2 %
4)) // 16)), xmask)
tl.store(out_ptr0 + x3, tmp0, xmask)
tl.store(out_ptr1 + x3, tmp0, xmask)
@triton.jit
def triton_poi_fused_clamp_div_exp_max_sub_2(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = 0.0
tmp15 = triton_helpers.maximum(tmp13, tmp14)
tmp16 = tmp2 - tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + x2, tmp17, xmask)
@triton.jit
def triton_poi_fused_add_clamp_div_exp_max_rsub_sum_3(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp13 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp16 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = 0.5
tmp9 = tmp7 * tmp8
tmp11 = tmp10 * tmp8
tmp12 = triton_helpers.maximum(tmp9, tmp11)
tmp14 = tmp13 * tmp8
tmp15 = triton_helpers.maximum(tmp12, tmp14)
tmp17 = tmp16 * tmp8
tmp18 = triton_helpers.maximum(tmp15, tmp17)
tmp19 = 0.0
tmp20 = triton_helpers.maximum(tmp18, tmp19)
tmp21 = tmp19 - tmp20
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp6 + tmp22
tl.store(out_ptr0 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_add_clamp_div_exp_max_rsub_sum_4(in_out_ptr0, in_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 / tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1, 4, 4), (16, 4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (1, 4, 4), (16, 4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (1, 4, 4), (16, 4, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (4, 1), 0), out=buf0)
del primals_4
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (4, 1), 0), out=buf1)
del primals_6
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_8, (4, 4), (4, 1), 0), out=buf2)
del primals_8
buf3 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_add_0[grid(256)](buf3, primals_5, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_5
buf4 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
triton_poi_fused_add_0[grid(256)](buf4, primals_7, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_7
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
buf15 = empty_strided_cuda((16, 4, 4), (16, 1, 4), torch.float32)
triton_poi_fused_bmm_transpose_1[grid(256)](buf3, buf5, buf15, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf6 = reinterpret_tensor(buf3, (16, 4, 4), (16, 1, 4), 0)
del buf3
buf16 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_bmm_transpose_1[grid(256)](buf4, buf6, buf16, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf7 = reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0)
del buf4
extern_kernels.bmm(buf5, buf6, out=buf7)
buf8 = reinterpret_tensor(buf6, (4, 4, 1, 4, 4), (64, 16, 256, 4, 1), 0
)
del buf6
triton_poi_fused_clamp_div_exp_max_sub_2[grid(256)](buf7, buf8, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf9 = empty_strided_cuda((4, 4, 1, 4, 1), (16, 4, 64, 1, 64),
torch.float32)
triton_poi_fused_add_clamp_div_exp_max_rsub_sum_3[grid(64)](buf8,
buf7, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf10 = reinterpret_tensor(buf8, (4, 4, 1, 4, 4), (64, 16, 16, 4, 1), 0
)
del buf8
triton_poi_fused_add_clamp_div_exp_max_rsub_sum_4[grid(256)](buf10,
buf9, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf9
buf11 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused_add_0[grid(256)](buf11, primals_9, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_9
buf12 = buf5
del buf5
buf14 = empty_strided_cuda((16, 4, 4), (16, 1, 4), torch.float32)
triton_poi_fused_bmm_transpose_1[grid(256)](buf11, buf12, buf14,
256, XBLOCK=128, num_warps=4, num_stages=1)
buf13 = reinterpret_tensor(buf11, (16, 4, 4), (16, 4, 1), 0)
del buf11
extern_kernels.bmm(reinterpret_tensor(buf10, (16, 4, 4), (16, 4, 1),
0), buf12, out=buf13)
del buf12
return reinterpret_tensor(buf13, (4, 4, 4, 4), (64, 16, 4, 1), 0
), buf7, buf10, buf14, buf15, buf16, reinterpret_tensor(primals_3,
(4, 64), (1, 4), 0), reinterpret_tensor(primals_2, (4, 64), (1, 4), 0
), reinterpret_tensor(primals_1, (4, 64), (1, 4), 0)
def uniform(size, tensor):
bound = 1.0 / math.sqrt(size)
if tensor is not None:
tensor.data.uniform_(-bound, bound)
def kaiming_uniform(tensor, fan, a):
if tensor is not None:
bound = math.sqrt(6 / ((1 + a ** 2) * fan))
tensor.data.uniform_(-bound, bound)
def restricted_softmax(src, dim=-1, margin=0):
src_max = torch.clamp(src.max(dim=dim, keepdim=True)[0], min=0)
out = (src - src_max).exp()
out = out / (out.sum(dim=dim, keepdim=True) + (margin - src_max).exp())
return out
class Linear(torch.nn.Module):
def __init__(self, in_channels, out_channels, groups=1, bias=True):
super(Linear, self).__init__()
assert in_channels % groups == 0 and out_channels % groups == 0
self.in_channels = in_channels
self.out_channels = out_channels
self.groups = groups
self.weight = Parameter(Tensor(groups, in_channels // groups,
out_channels // groups))
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
kaiming_uniform(self.weight, fan=self.weight.size(1), a=math.sqrt(5))
uniform(self.weight.size(1), self.bias)
def forward(self, src):
if self.groups > 1:
size = list(src.size())[:-1]
src = src.view(-1, self.groups, self.in_channels // self.groups)
src = src.transpose(0, 1).contiguous()
out = torch.matmul(src, self.weight)
out = out.transpose(1, 0).contiguous()
out = out.view(*(size + [self.out_channels]))
else:
out = torch.matmul(src, self.weight.squeeze(0))
if self.bias is not None:
out += self.bias
return out
def __repr__(self):
return '{}({}, {}, groups={}, bias={})'.format(self.__class__.
__name__, self.in_channels, self.out_channels, self.groups,
self.bias is not None)
class Attention(torch.nn.Module):
def __init__(self, dropout=0):
super(Attention, self).__init__()
self.dropout = dropout
def forward(self, query, key, value):
assert query.dim() == key.dim() == value.dim() >= 2
assert query.size(-1) == key.size(-1)
assert key.size(-2) == value.size(-2)
score = torch.matmul(query, key.transpose(-2, -1))
score = score / math.sqrt(key.size(-1))
score = restricted_softmax(score, dim=-1)
score = F.dropout(score, p=self.dropout, training=self.training)
return torch.matmul(score, value)
def __repr__(self):
return '{}(dropout={})'.format(self.__class__.__name__, self.dropout)
class MultiHeadNew(Attention):
def __init__(self, in_channels, out_channels, heads=1, groups=1,
dropout=0, bias=True):
super(MultiHeadNew, self).__init__(dropout)
self.in_channels = in_channels
self.out_channels = out_channels
self.heads = heads
self.groups = groups
self.bias = bias
assert in_channels % heads == 0 and out_channels % heads == 0
assert in_channels % groups == 0 and out_channels % groups == 0
assert max(groups, self.heads) % min(groups, self.heads) == 0
self.lin_q = Linear(in_channels, out_channels, groups, bias)
self.lin_k = Linear(in_channels, out_channels, groups, bias)
self.lin_v = Linear(in_channels, out_channels, groups, bias)
self.reset_parameters()
def reset_parameters(self):
self.lin_q.reset_parameters()
self.lin_k.reset_parameters()
self.lin_v.reset_parameters()
def __repr__(self):
return '{}({}, {}, heads={}, groups={}, dropout={}, bias={})'.format(
self.__class__.__name__, self.in_channels, self.out_channels,
self.heads, self.groups, self.dropout, self.bias)
def forward(self, input_0, input_1, input_2):
primals_4 = self.lin_q.weight
primals_5 = self.lin_q.bias
primals_6 = self.lin_k.weight
primals_7 = self.lin_k.bias
primals_8 = self.lin_v.weight
primals_9 = self.lin_v.bias
primals_1 = input_0
primals_2 = input_1
primals_3 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| cshjin/pytorch_geometric | MultiHead | false | 1,768 | [
"MIT"
] | 0 | 8dd0e76beb72135949a275edd851f80f7b97648f | https://github.com/cshjin/pytorch_geometric/tree/8dd0e76beb72135949a275edd851f80f7b97648f | import math
import torch
from torch import Tensor
from torch.nn import Linear
import torch.nn.functional as F
from torch.nn import Parameter
import torch.utils.data
def uniform(size, tensor):
bound = 1.0 / math.sqrt(size)
if tensor is not None:
tensor.data.uniform_(-bound, bound)
def kaiming_uniform(tensor, fan, a):
if tensor is not None:
bound = math.sqrt(6 / ((1 + a ** 2) * fan))
tensor.data.uniform_(-bound, bound)
def restricted_softmax(src, dim=-1, margin=0):
src_max = torch.clamp(src.max(dim=dim, keepdim=True)[0], min=0)
out = (src - src_max).exp()
out = out / (out.sum(dim=dim, keepdim=True) + (margin - src_max).exp())
return out
class Linear(torch.nn.Module):
def __init__(self, in_channels, out_channels, groups=1, bias=True):
super().__init__()
assert in_channels % groups == 0 and out_channels % groups == 0
self.in_channels = in_channels
self.out_channels = out_channels
self.groups = groups
self.weight = Parameter(Tensor(groups, in_channels // groups,
out_channels // groups))
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
kaiming_uniform(self.weight, fan=self.weight.size(1), a=math.sqrt(5))
uniform(self.weight.size(1), self.bias)
def forward(self, src):
if self.groups > 1:
size = list(src.size())[:-1]
src = src.view(-1, self.groups, self.in_channels // self.groups)
src = src.transpose(0, 1).contiguous()
out = torch.matmul(src, self.weight)
out = out.transpose(1, 0).contiguous()
out = out.view(*(size + [self.out_channels]))
else:
out = torch.matmul(src, self.weight.squeeze(0))
if self.bias is not None:
out += self.bias
return out
def __repr__(self):
return '{}({}, {}, groups={}, bias={})'.format(self.__class__.
__name__, self.in_channels, self.out_channels, self.groups,
self.bias is not None)
class Attention(torch.nn.Module):
def __init__(self, dropout=0):
super().__init__()
self.dropout = dropout
def forward(self, query, key, value):
assert query.dim() == key.dim() == value.dim() >= 2
assert query.size(-1) == key.size(-1)
assert key.size(-2) == value.size(-2)
score = torch.matmul(query, key.transpose(-2, -1))
score = score / math.sqrt(key.size(-1))
score = restricted_softmax(score, dim=-1)
score = F.dropout(score, p=self.dropout, training=self.training)
return torch.matmul(score, value)
def __repr__(self):
return '{}(dropout={})'.format(self.__class__.__name__, self.dropout)
class Model(Attention):
def __init__(self, in_channels, out_channels, heads=1, groups=1,
dropout=0, bias=True):
super().__init__(dropout)
self.in_channels = in_channels
self.out_channels = out_channels
self.heads = heads
self.groups = groups
self.bias = bias
assert in_channels % heads == 0 and out_channels % heads == 0
assert in_channels % groups == 0 and out_channels % groups == 0
assert max(groups, self.heads) % min(groups, self.heads) == 0
self.lin_q = Linear(in_channels, out_channels, groups, bias)
self.lin_k = Linear(in_channels, out_channels, groups, bias)
self.lin_v = Linear(in_channels, out_channels, groups, bias)
self.reset_parameters()
def reset_parameters(self):
self.lin_q.reset_parameters()
self.lin_k.reset_parameters()
self.lin_v.reset_parameters()
def forward(self, query, key, value):
assert query.dim() == key.dim() == value.dim() >= 2
assert query.size(-1) == key.size(-1) == value.size(-1)
asse
# ... truncated (>4000 chars) for memory efficiency |
MinibatchStdDev | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/64/c64ilbvzmdnnrmalq32ejoehyuog4qhx7ikcg5kbwgjcv6pvdhit.py
# Topologically Sorted Source Nodes: [mean, y, pow_1, mean_1, add, y_1, mean_2, y_3], Original ATen: [aten.mean, aten.sub, aten.pow, aten.add, aten.sqrt, aten.repeat]
# Source node to ATen node mapping:
# add => add
# mean => mean
# mean_1 => mean_1
# mean_2 => mean_2
# pow_1 => pow_1
# y => sub
# y_1 => sqrt
# y_3 => repeat
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%arg0_1, [0], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %mean), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2.0), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [0]), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean_1, 1e-08), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {})
# %mean_2 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sqrt,), kwargs = {})
# %repeat : [num_users=1] = call_function[target=torch.ops.aten.repeat.default](args = (%view, [4, 1, 4, 4]), kwargs = {})
triton_per_fused_add_mean_pow_repeat_sqrt_sub_0 = async_compile.triton('triton_per_fused_add_mean_pow_repeat_sqrt_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mean_pow_repeat_sqrt_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_mean_pow_repeat_sqrt_sub_0(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
r1 = rindex % 16
r2 = (rindex // 16)
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr0 + (64 + r0), None)
tmp3 = tl.load(in_ptr0 + (128 + r0), None)
tmp5 = tl.load(in_ptr0 + (192 + r0), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-08
tmp22 = tmp20 + tmp21
tmp23 = libdevice.sqrt(tmp22)
tmp24 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK])
tmp26 = tl.sum(tmp24, 1)[:, None]
tmp27 = 64.0
tmp28 = tmp26 / tmp27
tl.store(out_ptr1 + (tl.broadcast_to(r1 + (80*r2), [XBLOCK, RBLOCK])), tmp28, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/yi/cyidf2yj3fms5jdxlfe7fdijzfj6p5a5q2qxo4llkuxnpqh6fj5o.py
# Topologically Sorted Source Nodes: [y_4], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# y_4 => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%arg0_1, %repeat], 1), kwargs = {})
triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
x1 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tl.store(out_ptr0 + (x0 + (80*x1)), tmp0, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf3 = empty_strided_cuda((4, 5, 4, 4), (80, 16, 4, 1), torch.float32)
buf2 = reinterpret_tensor(buf3, (4, 1, 4, 4), (80, 16, 4, 1), 64) # alias
# Topologically Sorted Source Nodes: [mean, y, pow_1, mean_1, add, y_1, mean_2, y_3], Original ATen: [aten.mean, aten.sub, aten.pow, aten.add, aten.sqrt, aten.repeat]
stream0 = get_raw_stream(0)
triton_per_fused_add_mean_pow_repeat_sqrt_sub_0.run(arg0_1, buf2, 1, 64, grid=grid(1), stream=stream0)
buf1 = reinterpret_tensor(buf3, (4, 4, 4, 4), (80, 16, 4, 1), 0) # alias
# Topologically Sorted Source Nodes: [y_4], Original ATen: [aten.cat]
triton_poi_fused_cat_1.run(arg0_1, buf1, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class MinibatchStdDev(nn.Module):
"""
Minibatch standard deviation layer for the discriminator, try to prevent mode collapse
From https://github.com/akanimax/BMSG-GAN/
"""
def __init__(self):
"""
derived class constructor
"""
super().__init__()
def forward(self, x, alpha=1e-08):
"""
forward pass of the layer
:param x: input activation volume
:param alpha: small number for numerical stability
:return: y => x appended with standard deviation constant map
"""
batch_size, _, height, width = x.shape
y = x - x.mean(dim=0, keepdim=True)
y = torch.sqrt(y.pow(2.0).mean(dim=0, keepdim=False) + alpha)
y = y.mean().view(1, 1, 1, 1)
y = y.repeat(batch_size, 1, height, width)
y = torch.cat([x, y], 1)
return y
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_add_mean_pow_repeat_sqrt_sub_0(in_ptr0, out_ptr1,
xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
r1 = rindex % 16
r2 = rindex // 16
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr0 + (64 + r0), None)
tmp3 = tl.load(in_ptr0 + (128 + r0), None)
tmp5 = tl.load(in_ptr0 + (192 + r0), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-08
tmp22 = tmp20 + tmp21
tmp23 = libdevice.sqrt(tmp22)
tmp24 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK])
tmp26 = tl.sum(tmp24, 1)[:, None]
tmp27 = 64.0
tmp28 = tmp26 / tmp27
tl.store(out_ptr1 + tl.broadcast_to(r1 + 80 * r2, [XBLOCK, RBLOCK]),
tmp28, None)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
x1 = xindex // 64
tmp0 = tl.load(in_ptr0 + x2, xmask)
tl.store(out_ptr0 + (x0 + 80 * x1), tmp0, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf3 = empty_strided_cuda((4, 5, 4, 4), (80, 16, 4, 1), torch.float32)
buf2 = reinterpret_tensor(buf3, (4, 1, 4, 4), (80, 16, 4, 1), 64)
get_raw_stream(0)
triton_per_fused_add_mean_pow_repeat_sqrt_sub_0[grid(1)](arg0_1,
buf2, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
buf1 = reinterpret_tensor(buf3, (4, 4, 4, 4), (80, 16, 4, 1), 0)
triton_poi_fused_cat_1[grid(256)](arg0_1, buf1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf3,
class MinibatchStdDevNew(nn.Module):
"""
Minibatch standard deviation layer for the discriminator, try to prevent mode collapse
From https://github.com/akanimax/BMSG-GAN/
"""
def __init__(self):
"""
derived class constructor
"""
super().__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| cwza/deep_t2i | MinibatchStdDev | false | 1,769 | [
"Apache-2.0"
] | 0 | 22877fdd28ad407984ddc3bc4d57109c54c22fc0 | https://github.com/cwza/deep_t2i/tree/22877fdd28ad407984ddc3bc4d57109c54c22fc0 | import torch
import torch.nn as nn
class Model(nn.Module):
"""
Minibatch standard deviation layer for the discriminator, try to prevent mode collapse
From https://github.com/akanimax/BMSG-GAN/
"""
def __init__(self):
"""
derived class constructor
"""
super().__init__()
def forward(self, x, alpha=1e-08):
"""
forward pass of the layer
:param x: input activation volume
:param alpha: small number for numerical stability
:return: y => x appended with standard deviation constant map
"""
batch_size, _, height, width = x.shape
y = x - x.mean(dim=0, keepdim=True)
y = torch.sqrt(y.pow(2.0).mean(dim=0, keepdim=False) + alpha)
y = y.mean().view(1, 1, 1, 1)
y = y.repeat(batch_size, 1, height, width)
y = torch.cat([x, y], 1)
return y
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
Linker | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/is/cispe7zbbl4nxt2jjus6h5iou2w7htohqj7z2oz6g7nqz6vbpbqr.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.avg_pool2d]
# Source node to ATen node mapping:
# x => avg_pool2d
# Graph fragment:
# %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%arg0_1, [4, 4], [1, 1]), kwargs = {})
triton_poi_fused_avg_pool2d_0 = async_compile.triton('triton_poi_fused_avg_pool2d_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (16*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (16*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (16*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (16*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (4 + (16*x0)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (5 + (16*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (6 + (16*x0)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (7 + (16*x0)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (8 + (16*x0)), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (9 + (16*x0)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (10 + (16*x0)), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr0 + (11 + (16*x0)), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (12 + (16*x0)), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr0 + (13 + (16*x0)), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr0 + (14 + (16*x0)), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr0 + (15 + (16*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp8 = tmp7 + tmp6
tmp10 = tmp9 + tmp8
tmp12 = tmp11 + tmp10
tmp14 = tmp13 + tmp12
tmp16 = tmp15 + tmp14
tmp18 = tmp17 + tmp16
tmp20 = tmp19 + tmp18
tmp22 = tmp21 + tmp20
tmp24 = tmp23 + tmp22
tmp26 = tmp25 + tmp24
tmp28 = tmp27 + tmp26
tmp30 = tmp29 + tmp28
tmp31 = 0.0625
tmp32 = tmp30 * tmp31
tl.store(out_ptr0 + (x0), tmp32, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.avg_pool2d]
stream0 = get_raw_stream(0)
triton_poi_fused_avg_pool2d_0.run(arg0_1, buf0, 16, grid=grid(16), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class Linker(nn.Module):
def __init__(self, inplanes, outplanes, kernel_size, strides):
super(Linker, self).__init__()
self.avgpool = nn.AvgPool2d(kernel_size, strides)
self.pad = (0, 0, 0, 0) + ((outplanes - inplanes) // 2,) * 2
def forward(self, x):
x = self.avgpool(x)
x = F.pad(x, self.pad, 'constant', 0)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'inplanes': 4, 'outplanes': 4, 'kernel_size': 4, 'strides': 1}
]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp7 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp9 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp11 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp13 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp17 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp21 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp25 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp27 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp29 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp8 = tmp7 + tmp6
tmp10 = tmp9 + tmp8
tmp12 = tmp11 + tmp10
tmp14 = tmp13 + tmp12
tmp16 = tmp15 + tmp14
tmp18 = tmp17 + tmp16
tmp20 = tmp19 + tmp18
tmp22 = tmp21 + tmp20
tmp24 = tmp23 + tmp22
tmp26 = tmp25 + tmp24
tmp28 = tmp27 + tmp26
tmp30 = tmp29 + tmp28
tmp31 = 0.0625
tmp32 = tmp30 * tmp31
tl.store(out_ptr0 + x0, tmp32, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_avg_pool2d_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del arg0_1
return buf0,
class LinkerNew(nn.Module):
def __init__(self, inplanes, outplanes, kernel_size, strides):
super(LinkerNew, self).__init__()
self.avgpool = nn.AvgPool2d(kernel_size, strides)
self.pad = (0, 0, 0, 0) + ((outplanes - inplanes) // 2,) * 2
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| daniel03c1/audio_augment | Linker | false | 1,770 | [
"MIT"
] | 0 | ee73bb0844e22c57c9cbeb129560da4a3853f77d | https://github.com/daniel03c1/audio_augment/tree/ee73bb0844e22c57c9cbeb129560da4a3853f77d | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, inplanes, outplanes, kernel_size, strides):
super().__init__()
self.avgpool = nn.AvgPool2d(kernel_size, strides)
self.pad = (0, 0, 0, 0) + ((outplanes - inplanes) // 2,) * 2
def forward(self, x):
x = self.avgpool(x)
x = F.pad(x, self.pad, 'constant', 0)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'inplanes': 4, 'outplanes': 4, 'kernel_size': 4, 'strides': 1}
]
|
Swish | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/2u/c2uzxklzsbiemtrcl7via4yz7hqy7l5noylwtycrzffjpd453ape.py
# Topologically Sorted Source Nodes: [mul, sigmoid, mul_1], Original ATen: [aten.mul, aten.sigmoid]
# Source node to ATen node mapping:
# mul => mul
# mul_1 => mul_1
# sigmoid => sigmoid
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 1.0), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%mul,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %sigmoid), kwargs = {})
triton_poi_fused_mul_sigmoid_0 = async_compile.triton('triton_poi_fused_mul_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = tl.sigmoid(tmp2)
tmp4 = tmp0 * tmp3
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, sigmoid, mul_1], Original ATen: [aten.mul, aten.sigmoid]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_sigmoid_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn
import torch.nn as nn
class Swish(nn.Module):
"""Applies the element-wise function:
.. math::
\\text{Swish}(x) = x * \\text{Sigmoid}(\\alpha * x) for constant value alpha.
Citation: Searching for Activation Functions, Ramachandran et al., 2017, https://arxiv.org/abs/1710.05941.
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
Examples::
>>> m = Act['swish']()
>>> input = torch.randn(2)
>>> output = m(input)
"""
def __init__(self, alpha=1.0):
super().__init__()
self.alpha = alpha
def forward(self, input: 'torch.Tensor') ->torch.Tensor:
return input * torch.sigmoid(self.alpha * input)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = tl.sigmoid(tmp2)
tmp4 = tmp0 * tmp3
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_sigmoid_0[grid(256)](arg0_1, buf0, 256, XBLOCK
=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class SwishNew(nn.Module):
"""Applies the element-wise function:
.. math::
\\text{Swish}(x) = x * \\text{Sigmoid}(\\alpha * x) for constant value alpha.
Citation: Searching for Activation Functions, Ramachandran et al., 2017, https://arxiv.org/abs/1710.05941.
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
Examples::
>>> m = Act['swish']()
>>> input = torch.randn(2)
>>> output = m(input)
"""
def __init__(self, alpha=1.0):
super().__init__()
self.alpha = alpha
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| danielschulz/MONAI | Swish | false | 1,772 | [
"Apache-2.0"
] | 0 | 54ef6e9e700f0de3d50184c0148f953be871a58e | https://github.com/danielschulz/MONAI/tree/54ef6e9e700f0de3d50184c0148f953be871a58e | import torch
import torch.nn
import torch.nn as nn
class Model(nn.Module):
"""Applies the element-wise function:
.. math::
\\text{Swish}(x) = x * \\text{Sigmoid}(\\alpha * x) for constant value alpha.
Citation: Searching for Activation Functions, Ramachandran et al., 2017, https://arxiv.org/abs/1710.05941.
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
Examples::
>>> m = Act['swish']()
>>> input = torch.randn(2)
>>> output = m(input)
"""
def __init__(self, alpha=1.0):
super().__init__()
self.alpha = alpha
def forward(self, input: 'torch.Tensor') ->torch.Tensor:
return input * torch.sigmoid(self.alpha * input)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
DistanceMatrixLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/62/c6246vv4dds63l2ahv4bilcs2siu7ck2y2w6mqujdtdgef7p5b2x.py
# Topologically Sorted Source Nodes: [l1_diff, sq_lengths], Original ATen: [aten.sum]
# Source node to ATen node mapping:
# l1_diff => sum_2
# sq_lengths => sum_1
# Graph fragment:
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view_1, [1]), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view, [1]), kwargs = {})
triton_per_fused_sum_0 = async_compile.triton('triton_per_fused_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_sum_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + (64*x0)), xmask, other=0.0)
tmp2 = tl.load(in_ptr2 + (r1 + (64*x0)), xmask, other=0.0)
tmp3 = tmp1 - tmp2
tmp4 = tl_math.abs(tmp3)
tmp5 = tmp0 * tmp4
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp12 = tl.where(xmask, tmp10, 0)
tmp13 = tl.sum(tmp12, 1)[:, None]
tl.store(out_ptr0 + (x0), tmp9, xmask)
tl.store(out_ptr1 + (x0), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/qz/cqz2vfmobz2hrhya7c3vow4lxpxacz2mch7fn6l2jgloq5sbmnrq.py
# Topologically Sorted Source Nodes: [truediv, mean], Original ATen: [aten.div, aten.mean]
# Source node to ATen node mapping:
# mean => mean
# truediv => div
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_2, %sum_1), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%div,), kwargs = {})
triton_per_fused_div_mean_1 = async_compile.triton('triton_per_fused_div_mean_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_mean_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_div_mean_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = tmp0 / tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.sum(tmp3, 1)[:, None]
tmp6 = 4.0
tmp7 = tmp5 / tmp6
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp7, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf1 = empty_strided_cuda((4, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [l1_diff, sq_lengths], Original ATen: [aten.sum]
stream0 = get_raw_stream(0)
triton_per_fused_sum_0.run(arg0_1, arg1_1, arg2_1, buf0, buf1, 4, 64, grid=grid(4), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [truediv, mean], Original ATen: [aten.div, aten.mean]
triton_per_fused_div_mean_1.run(buf3, buf0, buf1, 1, 4, grid=grid(1), stream=stream0)
del buf0
del buf1
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class DistanceMatrixLoss(nn.Module):
def __init__(self):
super().__init__()
def forward(self, scores, labels, mask):
sq_lengths = mask.view(mask.size(0), -1).sum(1)
l1_diff = (mask * torch.abs(scores - labels)).view(labels.size(0), -1
).sum(1)
return torch.mean(l1_diff / sq_lengths)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_sum_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1,
xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0)
tmp2 = tl.load(in_ptr2 + (r1 + 64 * x0), xmask, other=0.0)
tmp3 = tmp1 - tmp2
tmp4 = tl_math.abs(tmp3)
tmp5 = tmp0 * tmp4
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp12 = tl.where(xmask, tmp10, 0)
tmp13 = tl.sum(tmp12, 1)[:, None]
tl.store(out_ptr0 + x0, tmp9, xmask)
tl.store(out_ptr1 + x0, tmp13, xmask)
@triton.jit
def triton_per_fused_div_mean_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 / tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.sum(tmp3, 1)[:, None]
tmp6 = 4.0
tmp7 = tmp5 / tmp6
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp7, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.float32)
buf1 = empty_strided_cuda((4,), (1,), torch.float32)
get_raw_stream(0)
triton_per_fused_sum_0[grid(4)](arg0_1, arg1_1, arg2_1, buf0, buf1,
4, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2
del buf2
triton_per_fused_div_mean_1[grid(1)](buf3, buf0, buf1, 1, 4, XBLOCK
=1, num_warps=2, num_stages=1)
del buf0
del buf1
return buf3,
class DistanceMatrixLossNew(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| daemon/vizbert | DistanceMatrixLoss | false | 1,773 | [
"MIT"
] | 0 | e40b7d1529f8857050313f8d87ff03b1b7226c9e | https://github.com/daemon/vizbert/tree/e40b7d1529f8857050313f8d87ff03b1b7226c9e | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self):
super().__init__()
def forward(self, scores, labels, mask):
sq_lengths = mask.view(mask.size(0), -1).sum(1)
l1_diff = (mask * torch.abs(scores - labels)).view(labels.size(0), -1
).sum(1)
return torch.mean(l1_diff / sq_lengths)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return []
|
ExtResNetBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/cn/ccnjw5i2bmojhqd43gtvy2mo5rvcvx4kyqocflzm32zw36biefd7.py
# Topologically Sorted Source Nodes: [input_2, input_3], Original ATen: [aten.native_group_norm, aten.elu]
# Source node to ATen node mapping:
# input_2 => add, add_1, mul_1, rsqrt, var_mean
# input_3 => expm1, gt, mul_2, mul_4, where
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %unsqueeze_6), kwargs = {})
# %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %unsqueeze_3), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_1, 0), kwargs = {})
# %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, 1.0), kwargs = {})
# %expm1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_2,), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1, 1.0), kwargs = {})
# %where : [num_users=3] = call_function[target=torch.ops.aten.where.self](args = (%gt, %mul_2, %mul_4), kwargs = {})
triton_per_fused_elu_native_group_norm_0 = async_compile.triton('triton_per_fused_elu_native_group_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_elu_native_group_norm_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_elu_native_group_norm_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = tmp0 - tmp10
tmp18 = 16.0
tmp19 = tmp16 / tmp18
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp17 * tmp22
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp28 = 0.0
tmp29 = tmp27 > tmp28
tmp30 = 1.0
tmp31 = tmp27 * tmp30
tmp32 = libdevice.expm1(tmp31)
tmp33 = tmp32 * tmp30
tmp34 = tl.where(tmp29, tmp31, tmp33)
tl.store(in_out_ptr0 + (r1 + (16*x0)), tmp34, xmask)
tl.store(out_ptr2 + (x0), tmp22, xmask)
tl.store(out_ptr0 + (x0), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/ql/cql5y77ikb3qyybctpvmmqkg56j3yahci6d3fpavhe5gjjt6labu.py
# Topologically Sorted Source Nodes: [input_8, out, out_1], Original ATen: [aten.native_group_norm, aten.add, aten.elu]
# Source node to ATen node mapping:
# input_8 => add_4, add_5, mul_11, rsqrt_2, var_mean_2
# out => add_6
# out_1 => expm1_2, gt_2, mul_12, mul_14, where_2
# Graph fragment:
# %var_mean_2 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_4, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_4, 1e-05), kwargs = {})
# %rsqrt_2 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {})
# %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_5, %unsqueeze_22), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_11, %unsqueeze_19), kwargs = {})
# %add_6 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_5, %where), kwargs = {})
# %gt_2 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_6, 0), kwargs = {})
# %mul_12 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_6, 1.0), kwargs = {})
# %expm1_2 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_12,), kwargs = {})
# %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1_2, 1.0), kwargs = {})
# %where_2 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %mul_12, %mul_14), kwargs = {})
triton_per_fused_add_elu_native_group_norm_1 = async_compile.triton('triton_per_fused_add_elu_native_group_norm_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_elu_native_group_norm_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_elu_native_group_norm_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr3 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = tmp0 - tmp10
tmp18 = 16.0
tmp19 = tmp16 / tmp18
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp17 * tmp22
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp29 = tmp27 + tmp28
tmp30 = 0.0
tmp31 = tmp29 > tmp30
tmp32 = 1.0
tmp33 = tmp29 * tmp32
tmp34 = libdevice.expm1(tmp33)
tmp35 = tmp34 * tmp32
tmp36 = tl.where(tmp31, tmp33, tmp35)
tl.store(in_out_ptr0 + (r1 + (16*x0)), tmp36, xmask)
tl.store(out_ptr2 + (x0), tmp22, xmask)
tl.store(out_ptr0 + (x0), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_2, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_1, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf0, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1))
buf1 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf6 = buf4; del buf4 # reuse
buf5 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
# Topologically Sorted Source Nodes: [input_2, input_3], Original ATen: [aten.native_group_norm, aten.elu]
stream0 = get_raw_stream(0)
triton_per_fused_elu_native_group_norm_0.run(buf6, buf0, primals_3, primals_4, buf1, buf5, 16, 16, grid=grid(16), stream=stream0)
del primals_4
# Topologically Sorted Source Nodes: [input_4], Original ATen: [aten.convolution]
buf7 = extern_kernels.convolution(reinterpret_tensor(buf6, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_5, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf7, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1))
buf8 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf13 = buf11; del buf11 # reuse
buf12 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
# Topologically Sorted Source Nodes: [input_5, input_6], Original ATen: [aten.native_group_norm, aten.elu]
triton_per_fused_elu_native_group_norm_0.run(buf13, buf7, primals_6, primals_7, buf8, buf12, 16, 16, grid=grid(16), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [input_7], Original ATen: [aten.convolution]
buf14 = extern_kernels.convolution(reinterpret_tensor(buf13, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_8, stride=(1, 1, 1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False, output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf14, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1))
buf15 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf19 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf20 = buf19; del buf19 # reuse
buf18 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
# Topologically Sorted Source Nodes: [input_8, out, out_1], Original ATen: [aten.native_group_norm, aten.add, aten.elu]
triton_per_fused_add_elu_native_group_norm_1.run(buf20, buf14, primals_9, primals_10, buf6, buf15, buf18, 16, 16, grid=grid(16), stream=stream0)
del primals_10
return (buf20, primals_1, primals_3, primals_5, primals_6, primals_8, primals_9, reinterpret_tensor(primals_2, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0), buf0, reinterpret_tensor(buf1, (4, 4), (4, 1), 0), reinterpret_tensor(buf5, (4, 4), (4, 1), 0), buf6, buf7, reinterpret_tensor(buf8, (4, 4), (4, 1), 0), reinterpret_tensor(buf12, (4, 4), (4, 1), 0), buf13, buf14, reinterpret_tensor(buf15, (4, 4), (4, 1), 0), reinterpret_tensor(buf18, (4, 4), (4, 1), 0), buf20, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 3, 3, 3), (108, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4, 3, 3, 3), (108, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4, 3, 3, 3), (108, 27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn as nn
def conv3d(in_channels, out_channels, kernel_size, bias, padding=1):
return nn.Conv3d(in_channels, out_channels, kernel_size, padding=
padding, bias=bias)
def create_conv(in_channels, out_channels, kernel_size, order, num_groups,
padding=1):
"""
Create a list of modules with together constitute a single conv layer with non-linearity
and optional batchnorm/groupnorm.
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
order (string): order of things, e.g.
'cr' -> conv + ReLU
'crg' -> conv + ReLU + groupnorm
'cl' -> conv + LeakyReLU
'ce' -> conv + ELU
num_groups (int): number of groups for the GroupNorm
padding (int): add zero-padding to the input
Return:
list of tuple (name, module)
"""
assert 'c' in order, 'Conv layer MUST be present'
assert order[0
] not in 'rle', 'Non-linearity cannot be the first operation in the layer'
modules = []
for i, char in enumerate(order):
if char == 'r':
modules.append(('ReLU', nn.ReLU(inplace=True)))
elif char == 'l':
modules.append(('LeakyReLU', nn.LeakyReLU(negative_slope=0.1,
inplace=True)))
elif char == 'e':
modules.append(('ELU', nn.ELU(inplace=True)))
elif char == 'c':
bias = not ('g' in order or 'b' in order)
modules.append(('conv', conv3d(in_channels, out_channels,
kernel_size, bias, padding=padding)))
elif char == 'g':
is_before_conv = i < order.index('c')
assert not is_before_conv, 'GroupNorm MUST go after the Conv3d'
if out_channels < num_groups:
num_groups = out_channels
modules.append(('groupnorm', nn.GroupNorm(num_groups=num_groups,
num_channels=out_channels)))
elif char == 'b':
is_before_conv = i < order.index('c')
if is_before_conv:
modules.append(('batchnorm', nn.BatchNorm3d(in_channels)))
else:
modules.append(('batchnorm', nn.BatchNorm3d(out_channels)))
else:
raise ValueError(
f"Unsupported layer type '{char}'. MUST be one of ['b', 'g', 'r', 'l', 'e', 'c']"
)
return modules
class SingleConv(nn.Sequential):
"""
Basic convolutional module consisting of a Conv3d, non-linearity and optional batchnorm/groupnorm. The order
of operations can be specified via the `order` parameter
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
kernel_size (int): size of the convolving kernel
order (string): determines the order of layers, e.g.
'cr' -> conv + ReLU
'crg' -> conv + ReLU + groupnorm
'cl' -> conv + LeakyReLU
'ce' -> conv + ELU
num_groups (int): number of groups for the GroupNorm
"""
def __init__(self, in_channels, out_channels, kernel_size=3, order=
'crg', num_groups=8, padding=1):
super(SingleConv, self).__init__()
for name, module in create_conv(in_channels, out_channels,
kernel_size, order, num_groups, padding=padding):
self.add_module(name, module)
class ExtResNetBlock(nn.Module):
"""
Basic UNet block consisting of a SingleConv followed by the residual block.
The SingleConv takes care of increasing/decreasing the number of channels and also ensures that the number
of output channels is compatible with the residual block that follows.
This block can be used instead of standard DoubleConv in the Encoder module.
Motivated by: https://arxiv.org/pdf/1706.00120.pdf
Notice we use ELU instead of ReLU (order='cge') and put non-linearity after the groupnorm.
"""
def __init__(self, in_channels, out_channels, kernel_size=3, order=
'cge', num_groups=8, **kwargs):
super(ExtResNetBlock, self).__init__()
self.conv1 = SingleConv(in_channels, out_channels, kernel_size=
kernel_size, order=order, num_groups=num_groups)
self.conv2 = SingleConv(out_channels, out_channels, kernel_size=
kernel_size, order=order, num_groups=num_groups)
n_order = order
for c in 'rel':
n_order = n_order.replace(c, '')
self.conv3 = SingleConv(out_channels, out_channels, kernel_size=
kernel_size, order=n_order, num_groups=num_groups)
if 'l' in order:
self.non_linearity = nn.LeakyReLU(negative_slope=0.1, inplace=True)
elif 'e' in order:
self.non_linearity = nn.ELU(inplace=True)
else:
self.non_linearity = nn.ReLU(inplace=True)
def forward(self, x):
out = self.conv1(x)
residual = out
out = self.conv2(out)
out = self.conv3(out)
out += residual
out = self.non_linearity(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_elu_native_group_norm_0(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = tmp0 - tmp10
tmp18 = 16.0
tmp19 = tmp16 / tmp18
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp17 * tmp22
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp28 = 0.0
tmp29 = tmp27 > tmp28
tmp30 = 1.0
tmp31 = tmp27 * tmp30
tmp32 = libdevice.expm1(tmp31)
tmp33 = tmp32 * tmp30
tmp34 = tl.where(tmp29, tmp31, tmp33)
tl.store(in_out_ptr0 + (r1 + 16 * x0), tmp34, xmask)
tl.store(out_ptr2 + x0, tmp22, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
@triton.jit
def triton_per_fused_add_elu_native_group_norm_1(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr3 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = tmp0 - tmp10
tmp18 = 16.0
tmp19 = tmp16 / tmp18
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp17 * tmp22
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp29 = tmp27 + tmp28
tmp30 = 0.0
tmp31 = tmp29 > tmp30
tmp32 = 1.0
tmp33 = tmp29 * tmp32
tmp34 = libdevice.expm1(tmp33)
tmp35 = tmp34 * tmp32
tmp36 = tl.where(tmp31, tmp33, tmp35)
tl.store(in_out_ptr0 + (r1 + 16 * x0), tmp36, xmask)
tl.store(out_ptr2 + x0, tmp22, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4, 3, 3, 3), (108, 27, 9, 3, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_2, (1,
4, 4, 4, 4), (256, 64, 16, 4, 1), 0), primals_1, stride=(1, 1,
1), padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf0, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1))
buf1 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf6 = buf4
del buf4
buf5 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
get_raw_stream(0)
triton_per_fused_elu_native_group_norm_0[grid(16)](buf6, buf0,
primals_3, primals_4, buf1, buf5, 16, 16, XBLOCK=8, num_warps=2,
num_stages=1)
del primals_4
buf7 = extern_kernels.convolution(reinterpret_tensor(buf6, (1, 4, 4,
4, 4), (256, 64, 16, 4, 1), 0), primals_5, stride=(1, 1, 1),
padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf7, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1))
buf8 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf13 = buf11
del buf11
buf12 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
triton_per_fused_elu_native_group_norm_0[grid(16)](buf13, buf7,
primals_6, primals_7, buf8, buf12, 16, 16, XBLOCK=8, num_warps=
2, num_stages=1)
del primals_7
buf14 = extern_kernels.convolution(reinterpret_tensor(buf13, (1, 4,
4, 4, 4), (256, 64, 16, 4, 1), 0), primals_8, stride=(1, 1, 1),
padding=(1, 1, 1), dilation=(1, 1, 1), transposed=False,
output_padding=(0, 0, 0), groups=1, bias=None)
assert_size_stride(buf14, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1))
buf15 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf19 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf20 = buf19
del buf19
buf18 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
triton_per_fused_add_elu_native_group_norm_1[grid(16)](buf20, buf14,
primals_9, primals_10, buf6, buf15, buf18, 16, 16, XBLOCK=8,
num_warps=2, num_stages=1)
del primals_10
return (buf20, primals_1, primals_3, primals_5, primals_6, primals_8,
primals_9, reinterpret_tensor(primals_2, (1, 4, 4, 4, 4), (256, 64,
16, 4, 1), 0), buf0, reinterpret_tensor(buf1, (4, 4), (4, 1), 0),
reinterpret_tensor(buf5, (4, 4), (4, 1), 0), buf6, buf7,
reinterpret_tensor(buf8, (4, 4), (4, 1), 0), reinterpret_tensor(
buf12, (4, 4), (4, 1), 0), buf13, buf14, reinterpret_tensor(buf15,
(4, 4), (4, 1), 0), reinterpret_tensor(buf18, (4, 4), (4, 1), 0), buf20
)
def conv3d(in_channels, out_channels, kernel_size, bias, padding=1):
return nn.Conv3d(in_channels, out_channels, kernel_size, padding=
padding, bias=bias)
def create_conv(in_channels, out_channels, kernel_size, order, num_groups,
padding=1):
"""
Create a list of modules with together constitute a single conv layer with non-linearity
and optional batchnorm/groupnorm.
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
order (string): order of things, e.g.
'cr' -> conv + ReLU
'crg' -> conv + ReLU + groupnorm
'cl' -> conv + LeakyReLU
'ce' -> conv + ELU
num_groups (int): number of groups for the GroupNorm
padding (int): add zero-padding to the input
Return:
list of tuple (name, module)
"""
assert 'c' in order, 'Conv layer MUST be present'
assert order[0
] not in 'rle', 'Non-linearity cannot be the first operation in the layer'
modules = []
for i, char in enumerate(order):
if char == 'r':
modules.append(('ReLU', nn.ReLU(inplace=True)))
elif char == 'l':
modules.append(('LeakyReLU', nn.LeakyReLU(negative_slope=0.1,
inplace=True)))
elif char == 'e':
modules.append(('ELU', nn.ELU(inplace=True)))
elif char == 'c':
bias = not ('g' in order or 'b' in order)
modules.append(('conv', conv3d(in_channels, out_channels,
kernel_size, bias, padding=padding)))
elif char == 'g':
is_before_conv = i < order.index('c')
assert not is_before_conv, 'GroupNorm MUST go after the Conv3d'
if out_channels < num_groups:
num_groups = out_channels
modules.append(('groupnorm', nn.GroupNorm(num_groups=num_groups,
num_channels=out_channels)))
elif char == 'b':
is_before_conv = i < order.index('c')
if is_before_conv:
modules.append(('batchnorm', nn.BatchNorm3d(in_channels)))
else:
modules.append(('batchnorm', nn.BatchNorm3d(out_channels)))
else:
raise ValueError(
f"Unsupported layer type '{char}'. MUST be one of ['b', 'g', 'r', 'l', 'e', 'c']"
)
return modules
class SingleConv(nn.Sequential):
"""
Basic convolutional module consisting of a Conv3d, non-linearity and optional batchnorm/groupnorm. The order
of operations can be specified via the `order` parameter
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
kernel_size (int): size of the convolving kernel
order (string): determines the order of layers, e.g.
'cr' -> conv + ReLU
'crg' -> conv + ReLU + groupnorm
'cl' -> conv + LeakyReLU
'ce' -> conv + ELU
num_groups (int): number of groups for the GroupNorm
"""
def __init__(self, in_channels, out_channels, kernel_size=3, order=
'crg', num_groups=8, padding=1):
super(SingleConv, self).__init__()
for name, module in create_conv(in_channels, out_channels,
kernel_size, order, num_groups, padding=padding):
self.add_module(name, module)
class ExtResNetBlockNew(nn.Module):
"""
Basic UNet block consisting of a SingleConv followed by the residual block.
The SingleConv takes care of increasing/decreasing the number of channels and also ensures that the number
of output channels is compatible with the residual block that follows.
This block can be used instead of standard DoubleConv in the Encoder module.
Motivated by: https://arxiv.org/pdf/1706.00120.pdf
Notice we use ELU instead of ReLU (order='cge') and put non-linearity after the groupnorm.
"""
def __init__(self, in_channels, out_channels, kernel_size=3, order=
'cge', num_groups=8, **kwargs):
super(ExtResNetBlockNew, self).__init__()
self.conv1 = SingleConv(in_channels, out_channels, kernel_size=
kernel_size, order=order, num_groups=num_groups)
self.conv2 = SingleConv(out_channels, out_channels, kernel_size=
kernel_size, order=order, num_groups=num_groups)
n_order = order
for c in 'rel':
n_order = n_order.replace(c, '')
self.conv3 = SingleConv(out_channels, out_channels, kernel_size=
kernel_size, order=n_order, num_groups=num_groups)
if 'l' in order:
self.non_linearity = nn.LeakyReLU(negative_slope=0.1, inplace=True)
elif 'e' in order:
self.non_linearity = nn.ELU(inplace=True)
else:
self.non_linearity = nn.ReLU(inplace=True)
def forward(self, input_0):
primals_1 = self.conv1.conv.weight
primals_3 = self.conv1.groupnorm.weight
primals_4 = self.conv1.groupnorm.bias
primals_5 = self.conv2.conv.weight
primals_6 = self.conv2.groupnorm.weight
primals_7 = self.conv2.groupnorm.bias
primals_8 = self.conv3.conv.weight
primals_9 = self.conv3.groupnorm.weight
primals_10 = self.conv3.groupnorm.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0]
| cviaai/TMJ_tracking | ExtResNetBlock | false | 1,774 | [
"MIT"
] | 0 | 464ca21dbeb538dc9504bd5d0e5c4d92591e69c4 | https://github.com/cviaai/TMJ_tracking/tree/464ca21dbeb538dc9504bd5d0e5c4d92591e69c4 | import torch
from torch import nn as nn
def conv3d(in_channels, out_channels, kernel_size, bias, padding=1):
return nn.Conv3d(in_channels, out_channels, kernel_size, padding=
padding, bias=bias)
def create_conv(in_channels, out_channels, kernel_size, order, num_groups,
padding=1):
"""
Create a list of modules with together constitute a single conv layer with non-linearity
and optional batchnorm/groupnorm.
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
order (string): order of things, e.g.
'cr' -> conv + ReLU
'crg' -> conv + ReLU + groupnorm
'cl' -> conv + LeakyReLU
'ce' -> conv + ELU
num_groups (int): number of groups for the GroupNorm
padding (int): add zero-padding to the input
Return:
list of tuple (name, module)
"""
assert 'c' in order, 'Conv layer MUST be present'
assert order[0
] not in 'rle', 'Non-linearity cannot be the first operation in the layer'
modules = []
for i, char in enumerate(order):
if char == 'r':
modules.append(('ReLU', nn.ReLU(inplace=True)))
elif char == 'l':
modules.append(('LeakyReLU', nn.LeakyReLU(negative_slope=0.1,
inplace=True)))
elif char == 'e':
modules.append(('ELU', nn.ELU(inplace=True)))
elif char == 'c':
bias = not ('g' in order or 'b' in order)
modules.append(('conv', conv3d(in_channels, out_channels,
kernel_size, bias, padding=padding)))
elif char == 'g':
is_before_conv = i < order.index('c')
assert not is_before_conv, 'GroupNorm MUST go after the Conv3d'
if out_channels < num_groups:
num_groups = out_channels
modules.append(('groupnorm', nn.GroupNorm(num_groups=num_groups,
num_channels=out_channels)))
elif char == 'b':
is_before_conv = i < order.index('c')
if is_before_conv:
modules.append(('batchnorm', nn.BatchNorm3d(in_channels)))
else:
modules.append(('batchnorm', nn.BatchNorm3d(out_channels)))
else:
raise ValueError(
f"Unsupported layer type '{char}'. MUST be one of ['b', 'g', 'r', 'l', 'e', 'c']"
)
return modules
class SingleConv(nn.Sequential):
"""
Basic convolutional module consisting of a Conv3d, non-linearity and optional batchnorm/groupnorm. The order
of operations can be specified via the `order` parameter
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
kernel_size (int): size of the convolving kernel
order (string): determines the order of layers, e.g.
'cr' -> conv + ReLU
'crg' -> conv + ReLU + groupnorm
'cl' -> conv + LeakyReLU
'ce' -> conv + ELU
num_groups (int): number of groups for the GroupNorm
"""
def __init__(self, in_channels, out_channels, kernel_size=3, order=
'crg', num_groups=8, padding=1):
super().__init__()
for name, module in create_conv(in_channels, out_channels,
kernel_size, order, num_groups, padding=padding):
self.add_module(name, module)
class Model(nn.Module):
"""
Basic UNet block consisting of a SingleConv followed by the residual block.
The SingleConv takes care of increasing/decreasing the number of channels and also ensures that the number
of output channels is compatible with the residual block that follows.
This block can be used instead of standard DoubleConv in the Encoder module.
Motivated by: https://arxiv.org/pdf/1706.00120.pdf
Notice we use ELU instead of ReLU (order='cge') and put non-linearity after the groupnorm.
"""
def __init__(self, in_channels, o
# ... truncated (>4000 chars) for memory efficiency |
DepthwiseSeparableConv2d | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/cu/ccutvo2v4333pq6xhrg2zryqqwthm7dmmuqprvva2xdwiodpz5jn.py
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# out_1 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution, %primals_3, %primals_4, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_4, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf0, (4, 4, 2, 2), (16, 4, 2, 1))
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 2, 2), (16, 4, 2, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf2, primals_4, 64, grid=grid(64), stream=stream0)
del primals_4
return (buf2, primals_1, primals_2, primals_3, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.optim.lr_scheduler
import torch.utils.data
from torchvision.transforms import *
class DepthwiseSeparableConv2d(nn.Module):
def __init__(self, input, output, padding=0, bias=False):
super(DepthwiseSeparableConv2d, self).__init__()
self.depthwise = nn.Conv2d(input, input, kernel_size=3, padding=
padding, groups=input, bias=bias)
self.pointwise = nn.Conv2d(input, output, kernel_size=1)
def forward(self, x):
out = self.depthwise(x)
out = self.pointwise(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input': 4, 'output': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.optim.lr_scheduler
import torch.utils.data
from torchvision.transforms import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf0, (4, 4, 2, 2), (16, 4, 2, 1))
buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 2, 2), (16, 4, 2, 1))
buf2 = buf1
del buf1
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(64)](buf2, primals_4, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_4
return buf2, primals_1, primals_2, primals_3, buf0
class DepthwiseSeparableConv2dNew(nn.Module):
def __init__(self, input, output, padding=0, bias=False):
super(DepthwiseSeparableConv2dNew, self).__init__()
self.depthwise = nn.Conv2d(input, input, kernel_size=3, padding=
padding, groups=input, bias=bias)
self.pointwise = nn.Conv2d(input, output, kernel_size=1)
def forward(self, input_0):
primals_1 = self.depthwise.weight
primals_3 = self.pointwise.weight
primals_4 = self.pointwise.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| csharpshooter/DeepLearning | DepthwiseSeparableConv2d | false | 1,775 | [
"MIT"
] | 0 | c1d20660c32076468970f7376931e1fcd0d2644e | https://github.com/csharpshooter/DeepLearning/tree/c1d20660c32076468970f7376931e1fcd0d2644e | import torch
import torch.nn as nn
import torch.optim.lr_scheduler
import torch.utils.data
from torchvision.transforms import *
class Model(nn.Module):
def __init__(self, input, output, padding=0, bias=False):
super().__init__()
self.depthwise = nn.Conv2d(input, input, kernel_size=3, padding=
padding, groups=input, bias=bias)
self.pointwise = nn.Conv2d(input, output, kernel_size=1)
def forward(self, x):
out = self.depthwise(x)
out = self.pointwise(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
CARAFE | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/fp/cfp5jrxxyxrvhcpoq5tio3p5tkhj5ugdrpyur3x4v6meatzih7jn.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = (yindex // 4)
tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask & ymask)
tl.store(out_ptr0 + (y0 + (4*x2) + (64*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/o7/co7jjmzuhhhwmmaawi26hedwch5l23jgtoa6pphpem7yyniv3kzc.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 6400
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = (yindex // 64)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (64*x2) + (576*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/ua/cuahiiwehityihcam6vkjahov2l6zrdfzrua2if7xoyemunzhwyg.py
# Topologically Sorted Source Nodes: [kernel_tensor], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# kernel_tensor => convolution
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/bb/cbbrhfm5bqxhz6vftqwodjddlvipu27rq7ji7u3l7zfzupa7ca6n.py
# Topologically Sorted Source Nodes: [kernel_tensor_3], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# kernel_tensor_3 => amax, exp, sub, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_1, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
triton_per_fused__softmax_3 = async_compile.triton('triton_per_fused__softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[256, 32],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__softmax_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 256
rnumel = 25
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r3 = rindex
x0 = xindex % 8
x1 = (xindex // 8) % 8
x2 = (xindex // 64)
x4 = xindex
tmp0 = tl.load(in_ptr0 + ((2*(x1 % 2)) + (4*r3) + (100*(x0 // 2)) + (400*(x1 // 2)) + (1600*x2) + (x0 % 2)), rmask & xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + ((2*(x1 % 2)) + (4*r3) + (x0 % 2)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(rmask & xmask, tmp3, float("-inf"))
tmp6 = triton_helpers.max2(tmp5, 1)[:, None]
tmp7 = tmp2 - tmp6
tmp8 = tl_math.exp(tmp7)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.where(rmask & xmask, tmp9, 0)
tmp12 = tl.sum(tmp11, 1)[:, None]
tl.store(out_ptr0 + (x4), tmp6, xmask)
tl.store(out_ptr1 + (x4), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/yl/cyl2mz5uucqrciohllfad7watdqphbicklksuxfz3bessqzuq7wv.py
# Topologically Sorted Source Nodes: [kernel_tensor_3], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# kernel_tensor_3 => div, exp, sub
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_4 = async_compile.triton('triton_poi_fused__softmax_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128, 64], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 100
xnumel = 64
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex % 8
x3 = (xindex // 8)
y0 = yindex % 25
y1 = (yindex // 25)
x4 = xindex
tmp0 = tl.load(in_ptr0 + ((2*(x3 % 2)) + (4*y0) + (100*(x2 // 2)) + (400*(x3 // 2)) + (1600*y1) + (x2 % 2)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + ((2*(x3 % 2)) + (4*y0) + (x2 % 2)), xmask & ymask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x4 + (64*y1)), xmask & ymask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr3 + (x4 + (64*y1)), xmask & ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp7 = tmp5 / tmp6
tl.store(out_ptr0 + (y0 + (25*x4) + (1600*y1)), tmp7, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/z4/cz4x2ogi3jnernglgttywrvgerrqoyx7y5gtvt6wxmbl7x36huwn.py
# Topologically Sorted Source Nodes: [out_tensor], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# out_tensor => clone_4
# Graph fragment:
# %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_5 = async_compile.triton('triton_poi_fused_clone_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256, 32], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 256
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x4 = xindex
y2 = (yindex // 16) % 4
y1 = (yindex // 4) % 4
y7 = yindex
tmp0 = (-2) + y2 + (x4 // 5)
tmp1 = tl.full([1, 1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1, 1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = (-2) + y1 + (x4 % 5)
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + ((-40) + y7 + (4*(x4 % 5)) + (16*(x4 // 5))), tmp10 & xmask & ymask, eviction_policy='evict_last', other=0.0)
tl.store(out_ptr0 + (x4 + (25*y7)), tmp11, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/lz/clzhpqxwzytws7sb3zeqiulkx2jcpdeozzrcrmelqffaauilize2.py
# Topologically Sorted Source Nodes: [out_tensor], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# out_tensor => clone_5
# Graph fragment:
# %clone_5 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_6 = async_compile.triton('triton_poi_fused_clone_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 1600
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x3 = xindex
y0 = yindex % 25
y1 = (yindex // 25) % 4
y2 = (yindex // 100)
y4 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (25*(x3 % 2)) + (50*y1) + (200*(x3 // 2)) + (400*y2)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x3 + (4*y4)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/mg/cmgqmpdpnkwjxi5m6dmo6ws34dsv5ypbtelfc75e5pkunf64443k.py
# Topologically Sorted Source Nodes: [out_tensor_3], Original ATen: [aten.pixel_shuffle]
# Source node to ATen node mapping:
# out_tensor_3 => clone_7
# Graph fragment:
# %clone_7 : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%view_9,), kwargs = {memory_format: torch.channels_last})
triton_poi_fused_pixel_shuffle_7 = async_compile.triton('triton_poi_fused_pixel_shuffle_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 64], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_pixel_shuffle_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_pixel_shuffle_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 64
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex % 8
x3 = (xindex // 8)
y0 = yindex % 4
y1 = (yindex // 4)
x4 = xindex
tmp0 = tl.load(in_ptr0 + ((2*(x3 % 2)) + (4*y0) + (16*(x2 // 2)) + (64*(x3 // 2)) + (256*y1) + (x2 % 2)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (4*x4) + (256*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/nh/cnhhwgdlzrbwogu7m4qsiyz6a5m5nuantoplux4yqgxgxza5obap.py
# Topologically Sorted Source Nodes: [out_tensor_4], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# out_tensor_4 => convolution_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%clone_7, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_8 = async_compile.triton('triton_poi_fused_convolution_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (64, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (64, ), (1, ))
assert_size_stride(primals_4, (100, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (100, ), (1, ))
assert_size_stride(primals_6, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_1, buf0, 16, 16, grid=grid(16, 16), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((100, 64, 3, 3), (576, 1, 192, 64), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_4, buf1, 6400, 9, grid=grid(6400, 9), stream=stream0)
del primals_4
# Topologically Sorted Source Nodes: [kernel_tensor], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 4, 4), (1024, 1, 256, 64))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [kernel_tensor], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf3, primals_3, 4096, grid=grid(4096), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [kernel_tensor_1], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, buf1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 100, 4, 4), (1600, 1, 400, 100))
buf5 = empty_strided_cuda((4, 1, 8, 8), (64, 256, 8, 1), torch.float32)
buf6 = empty_strided_cuda((4, 1, 8, 8), (64, 256, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [kernel_tensor_3], Original ATen: [aten._softmax]
triton_per_fused__softmax_3.run(buf4, primals_5, buf5, buf6, 256, 25, grid=grid(256), stream=stream0)
buf7 = empty_strided_cuda((4, 25, 8, 8), (1600, 1, 200, 25), torch.float32)
# Topologically Sorted Source Nodes: [kernel_tensor_3], Original ATen: [aten._softmax]
triton_poi_fused__softmax_4.run(buf4, primals_5, buf5, buf6, buf7, 100, 64, grid=grid(100, 64), stream=stream0)
del buf5
del buf6
del primals_5
buf8 = reinterpret_tensor(buf4, (4, 4, 4, 4, 25), (1600, 400, 100, 25, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [out_tensor], Original ATen: [aten.clone]
triton_poi_fused_clone_5.run(buf0, buf8, 256, 25, grid=grid(256, 25), stream=stream0)
buf9 = empty_strided_cuda((4, 4, 4, 25, 4), (1600, 400, 100, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_tensor], Original ATen: [aten.clone]
triton_poi_fused_clone_6.run(buf7, buf9, 1600, 4, grid=grid(1600, 4), stream=stream0)
buf10 = empty_strided_cuda((64, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_tensor], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf8, (64, 4, 25), (100, 25, 1), 0), reinterpret_tensor(buf9, (64, 25, 4), (100, 4, 1), 0), out=buf10)
del buf9
buf11 = empty_strided_cuda((4, 4, 8, 8), (256, 1, 32, 4), torch.float32)
# Topologically Sorted Source Nodes: [out_tensor_3], Original ATen: [aten.pixel_shuffle]
triton_poi_fused_pixel_shuffle_7.run(buf10, buf11, 16, 64, grid=grid(16, 64), stream=stream0)
del buf10
# Topologically Sorted Source Nodes: [out_tensor_4], Original ATen: [aten.convolution]
buf12 = extern_kernels.convolution(buf11, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 4, 8, 8), (256, 1, 32, 4))
buf13 = buf12; del buf12 # reuse
# Topologically Sorted Source Nodes: [out_tensor_4], Original ATen: [aten.convolution]
triton_poi_fused_convolution_8.run(buf13, primals_7, 1024, grid=grid(1024), stream=stream0)
del primals_7
return (buf13, buf0, primals_2, buf1, primals_6, buf3, buf7, buf11, reinterpret_tensor(buf8, (64, 25, 4), (100, 1, 25), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((64, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((100, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((100, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class CARAFE(nn.Module):
def __init__(self, inC, outC, Kencoder=3, delta=2, Kup=5, Cm=64):
super(CARAFE, self).__init__()
self.Kencoder = Kencoder
self.delta = delta
self.Kup = Kup
self.down = nn.Conv2d(in_channels=inC, out_channels=Cm, kernel_size=1)
self.encoder = nn.Conv2d(64, self.delta ** 2 * self.Kup ** 2, self.
Kencoder, 1, self.Kencoder // 2)
self.out = nn.Conv2d(inC, outC, 1)
def forward(self, in_tensor):
N, C, H, W = in_tensor.size()
kernel_tensor = self.down(in_tensor)
kernel_tensor = self.encoder(kernel_tensor)
kernel_tensor = F.pixel_shuffle(kernel_tensor, self.delta)
kernel_tensor = F.softmax(kernel_tensor, dim=1)
kernel_tensor = kernel_tensor.unfold(2, self.delta, step=self.delta)
kernel_tensor = kernel_tensor.unfold(3, self.delta, step=self.delta)
kernel_tensor = kernel_tensor.reshape(N, self.Kup ** 2, H, W, self.
delta ** 2)
kernel_tensor = kernel_tensor.permute(0, 2, 3, 1, 4)
in_tensor = F.pad(in_tensor, pad=(self.Kup // 2, self.Kup // 2,
self.Kup // 2, self.Kup // 2), mode='constant', value=0)
in_tensor = in_tensor.unfold(dimension=2, size=self.Kup, step=1)
in_tensor = in_tensor.unfold(3, self.Kup, step=1)
in_tensor = in_tensor.reshape(N, C, H, W, -1)
in_tensor = in_tensor.permute(0, 2, 3, 1, 4)
out_tensor = torch.matmul(in_tensor, kernel_tensor)
out_tensor = out_tensor.reshape(N, H, W, -1)
out_tensor = out_tensor.permute(0, 3, 1, 2)
out_tensor = F.pixel_shuffle(out_tensor, self.delta)
out_tensor = self.out(out_tensor)
return out_tensor
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'inC': 4, 'outC': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask)
tl.store(out_ptr0 + (y0 + 4 * x2 + 64 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 6400
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, None)
@triton.jit
def triton_per_fused__softmax_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 256
rnumel = 25
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r3 = rindex
x0 = xindex % 8
x1 = xindex // 8 % 8
x2 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (2 * (x1 % 2) + 4 * r3 + 100 * (x0 // 2) + 400 *
(x1 // 2) + 1600 * x2 + x0 % 2), rmask & xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (2 * (x1 % 2) + 4 * r3 + x0 % 2), rmask &
xmask, eviction_policy='evict_last', other=0.0)
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(rmask & xmask, tmp3, float('-inf'))
tmp6 = triton_helpers.max2(tmp5, 1)[:, None]
tmp7 = tmp2 - tmp6
tmp8 = tl_math.exp(tmp7)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.where(rmask & xmask, tmp9, 0)
tmp12 = tl.sum(tmp11, 1)[:, None]
tl.store(out_ptr0 + x4, tmp6, xmask)
tl.store(out_ptr1 + x4, tmp12, xmask)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 100
xnumel = 64
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex % 8
x3 = xindex // 8
y0 = yindex % 25
y1 = yindex // 25
x4 = xindex
tmp0 = tl.load(in_ptr0 + (2 * (x3 % 2) + 4 * y0 + 100 * (x2 // 2) + 400 *
(x3 // 2) + 1600 * y1 + x2 % 2), xmask & ymask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + (2 * (x3 % 2) + 4 * y0 + x2 % 2), xmask &
ymask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x4 + 64 * y1), xmask & ymask, eviction_policy
='evict_last')
tmp6 = tl.load(in_ptr3 + (x4 + 64 * y1), xmask & ymask, eviction_policy
='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp7 = tmp5 / tmp6
tl.store(out_ptr0 + (y0 + 25 * x4 + 1600 * y1), tmp7, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 256
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x4 = xindex
y2 = yindex // 16 % 4
y1 = yindex // 4 % 4
y7 = yindex
tmp0 = -2 + y2 + x4 // 5
tmp1 = tl.full([1, 1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1, 1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -2 + y1 + x4 % 5
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-40 + y7 + 4 * (x4 % 5) + 16 * (x4 // 5)),
tmp10 & xmask & ymask, eviction_policy='evict_last', other=0.0)
tl.store(out_ptr0 + (x4 + 25 * y7), tmp11, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 1600
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x3 = xindex
y0 = yindex % 25
y1 = yindex // 25 % 4
y2 = yindex // 100
y4 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 25 * (x3 % 2) + 50 * y1 + 200 * (x3 // 2
) + 400 * y2), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x3 + 4 * y4), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_pixel_shuffle_7(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 64
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex % 8
x3 = xindex // 8
y0 = yindex % 4
y1 = yindex // 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (2 * (x3 % 2) + 4 * y0 + 16 * (x2 // 2) + 64 *
(x3 // 2) + 256 * y1 + x2 % 2), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 4 * x4 + 256 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (64, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (64,), (1,))
assert_size_stride(primals_4, (100, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (100,), (1,))
assert_size_stride(primals_6, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(16, 16)](primals_1, buf0, 16, 16, XBLOCK=16,
YBLOCK=16, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((100, 64, 3, 3), (576, 1, 192, 64), torch
.float32)
triton_poi_fused_1[grid(6400, 9)](primals_4, buf1, 6400, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_4
buf2 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 4, 4), (1024, 1, 256, 64))
buf3 = buf2
del buf2
triton_poi_fused_convolution_2[grid(4096)](buf3, primals_3, 4096,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf4 = extern_kernels.convolution(buf3, buf1, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 100, 4, 4), (1600, 1, 400, 100))
buf5 = empty_strided_cuda((4, 1, 8, 8), (64, 256, 8, 1), torch.float32)
buf6 = empty_strided_cuda((4, 1, 8, 8), (64, 256, 8, 1), torch.float32)
triton_per_fused__softmax_3[grid(256)](buf4, primals_5, buf5, buf6,
256, 25, XBLOCK=8, num_warps=2, num_stages=1)
buf7 = empty_strided_cuda((4, 25, 8, 8), (1600, 1, 200, 25), torch.
float32)
triton_poi_fused__softmax_4[grid(100, 64)](buf4, primals_5, buf5,
buf6, buf7, 100, 64, XBLOCK=2, YBLOCK=128, num_warps=4,
num_stages=1)
del buf5
del buf6
del primals_5
buf8 = reinterpret_tensor(buf4, (4, 4, 4, 4, 25), (1600, 400, 100,
25, 1), 0)
del buf4
triton_poi_fused_clone_5[grid(256, 25)](buf0, buf8, 256, 25, XBLOCK
=32, YBLOCK=32, num_warps=4, num_stages=1)
buf9 = empty_strided_cuda((4, 4, 4, 25, 4), (1600, 400, 100, 4, 1),
torch.float32)
triton_poi_fused_clone_6[grid(1600, 4)](buf7, buf9, 1600, 4, XBLOCK
=4, YBLOCK=256, num_warps=4, num_stages=1)
buf10 = empty_strided_cuda((64, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf8, (64, 4, 25), (100, 25,
1), 0), reinterpret_tensor(buf9, (64, 25, 4), (100, 4, 1), 0),
out=buf10)
del buf9
buf11 = empty_strided_cuda((4, 4, 8, 8), (256, 1, 32, 4), torch.float32
)
triton_poi_fused_pixel_shuffle_7[grid(16, 64)](buf10, buf11, 16, 64,
XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del buf10
buf12 = extern_kernels.convolution(buf11, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 4, 8, 8), (256, 1, 32, 4))
buf13 = buf12
del buf12
triton_poi_fused_convolution_8[grid(1024)](buf13, primals_7, 1024,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
return (buf13, buf0, primals_2, buf1, primals_6, buf3, buf7, buf11,
reinterpret_tensor(buf8, (64, 25, 4), (100, 1, 25), 0))
class CARAFENew(nn.Module):
def __init__(self, inC, outC, Kencoder=3, delta=2, Kup=5, Cm=64):
super(CARAFENew, self).__init__()
self.Kencoder = Kencoder
self.delta = delta
self.Kup = Kup
self.down = nn.Conv2d(in_channels=inC, out_channels=Cm, kernel_size=1)
self.encoder = nn.Conv2d(64, self.delta ** 2 * self.Kup ** 2, self.
Kencoder, 1, self.Kencoder // 2)
self.out = nn.Conv2d(inC, outC, 1)
def forward(self, input_0):
primals_2 = self.down.weight
primals_3 = self.down.bias
primals_4 = self.encoder.weight
primals_5 = self.encoder.bias
primals_6 = self.out.weight
primals_7 = self.out.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| cs18chen/fbnn | CARAFE | false | 1,776 | [
"MIT"
] | 0 | 1f52c49f8d1e0e1fa7b5a04677351817c4c0e977 | https://github.com/cs18chen/fbnn/tree/1f52c49f8d1e0e1fa7b5a04677351817c4c0e977 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, inC, outC, Kencoder=3, delta=2, Kup=5, Cm=64):
super().__init__()
self.Kencoder = Kencoder
self.delta = delta
self.Kup = Kup
self.down = nn.Conv2d(in_channels=inC, out_channels=Cm, kernel_size=1)
self.encoder = nn.Conv2d(64, self.delta ** 2 * self.Kup ** 2, self.
Kencoder, 1, self.Kencoder // 2)
self.out = nn.Conv2d(inC, outC, 1)
def forward(self, in_tensor):
N, C, H, W = in_tensor.size()
kernel_tensor = self.down(in_tensor)
kernel_tensor = self.encoder(kernel_tensor)
kernel_tensor = F.pixel_shuffle(kernel_tensor, self.delta)
kernel_tensor = F.softmax(kernel_tensor, dim=1)
kernel_tensor = kernel_tensor.unfold(2, self.delta, step=self.delta)
kernel_tensor = kernel_tensor.unfold(3, self.delta, step=self.delta)
kernel_tensor = kernel_tensor.reshape(N, self.Kup ** 2, H, W, self.
delta ** 2)
kernel_tensor = kernel_tensor.permute(0, 2, 3, 1, 4)
in_tensor = F.pad(in_tensor, pad=(self.Kup // 2, self.Kup // 2,
self.Kup // 2, self.Kup // 2), mode='constant', value=0)
in_tensor = in_tensor.unfold(dimension=2, size=self.Kup, step=1)
in_tensor = in_tensor.unfold(3, self.Kup, step=1)
in_tensor = in_tensor.reshape(N, C, H, W, -1)
in_tensor = in_tensor.permute(0, 2, 3, 1, 4)
out_tensor = torch.matmul(in_tensor, kernel_tensor)
out_tensor = out_tensor.reshape(N, H, W, -1)
out_tensor = out_tensor.permute(0, 3, 1, 2)
out_tensor = F.pixel_shuffle(out_tensor, self.delta)
out_tensor = self.out(out_tensor)
return out_tensor
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
AMBinaryLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/ir/cirhwaysykynruozlnnr56hokyyqg2ixejahe2sxi5i23h6yll62.py
# Topologically Sorted Source Nodes: [mul_2, sub_3, mul_3, exp, add, log, mul_4, sub, mul_5, add_1, mul_6, exp_1, add_2, log_1, mul_7, add_, sum_1, neg, sub_2, mul_1, sigmoid_1, sub_1, mul, sigmoid], Original ATen: [aten.mul, aten.sub, aten.exp, aten.add, aten.log, aten.rsub, aten.sum, aten.neg, aten.sigmoid]
# Source node to ATen node mapping:
# add => add
# add_ => add_3
# add_1 => add_1
# add_2 => add_2
# exp => exp
# exp_1 => exp_1
# log => log
# log_1 => log_1
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# mul_3 => mul_3
# mul_4 => mul_4
# mul_5 => mul_5
# mul_6 => mul_6
# mul_7 => mul_7
# neg => neg
# sigmoid => sigmoid
# sigmoid_1 => sigmoid_1
# sub => sub
# sub_1 => sub_1
# sub_2 => sub_2
# sub_3 => sub_3
# sum_1 => sum_1
# Graph fragment:
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 0.02666666666666667), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, 0.35), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, -30), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul_3,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%exp, 1), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add,), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %log), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg0_1), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, 0.006666666666666665), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg1_1, 0.35), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, 30), kwargs = {})
# %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul_6,), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%exp_1, 1), kwargs = {})
# %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_2,), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_5, %log_1), kwargs = {})
# %add_3 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %mul_7), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%add_3,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg1_1,), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%neg, 0.35), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, 30), kwargs = {})
# %sigmoid_1 : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%mul_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, 0.35), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, 30), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%mul,), kwargs = {})
triton_per_fused_add_exp_log_mul_neg_rsub_sigmoid_sub_sum_0 = async_compile.triton('triton_per_fused_add_exp_log_mul_neg_rsub_sigmoid_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {7: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 8), equal_to_1=(7,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_exp_log_mul_neg_rsub_sigmoid_sub_sum_0', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_exp_log_mul_neg_rsub_sigmoid_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp5 = tl.load(in_ptr1 + (r0), None)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp3 = 0.02666666666666667
tmp4 = tmp0 * tmp3
tmp6 = 0.35
tmp7 = tmp5 - tmp6
tmp8 = -30.0
tmp9 = tmp7 * tmp8
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp10 + tmp1
tmp12 = tl_math.log(tmp11)
tmp13 = tmp4 * tmp12
tmp14 = 0.006666666666666665
tmp15 = tmp2 * tmp14
tmp16 = tmp5 + tmp6
tmp17 = 30.0
tmp18 = tmp16 * tmp17
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp19 + tmp1
tmp21 = tl_math.log(tmp20)
tmp22 = tmp15 * tmp21
tmp23 = tmp13 + tmp22
tmp24 = -tmp5
tmp25 = tmp24 - tmp6
tmp26 = tmp25 * tmp17
tmp27 = tl.sigmoid(tmp26)
tmp28 = tmp7 * tmp17
tmp29 = tl.sigmoid(tmp28)
tmp30 = tl.broadcast_to(tmp23, [RBLOCK])
tmp32 = triton_helpers.promote_to_tensor(tl.sum(tmp30, 0))
tl.store(out_ptr0 + (tl.broadcast_to(r0, [RBLOCK])), tmp2, None)
tl.store(out_ptr1 + (tl.broadcast_to(r0, [RBLOCK])), tmp23, None)
tl.store(out_ptr2 + (tl.broadcast_to(r0, [RBLOCK])), tmp27, None)
tl.store(out_ptr3 + (tl.broadcast_to(r0, [RBLOCK])), tmp29, None)
tl.store(out_ptr4 + (tl.full([1], 0, tl.int32)), tmp32, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf2 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [mul_2, sub_3, mul_3, exp, add, log, mul_4, sub, mul_5, add_1, mul_6, exp_1, add_2, log_1, mul_7, add_, sum_1, neg, sub_2, mul_1, sigmoid_1, sub_1, mul, sigmoid], Original ATen: [aten.mul, aten.sub, aten.exp, aten.add, aten.log, aten.rsub, aten.sum, aten.neg, aten.sigmoid]
stream0 = get_raw_stream(0)
triton_per_fused_add_exp_log_mul_neg_rsub_sigmoid_sub_sum_0.run(arg0_1, arg1_1, buf0, buf1, buf3, buf4, buf2, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf2, buf1, buf3, buf4, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class AMBinaryLoss(nn.Module):
def __init__(self, m=0.35, k=0.8, t=1, s=30, eps=1e-08, sym_adjustment=
False, auto_balance=False, label_smooth=0.0, gamma_neg=0, gamma_pos=0):
super().__init__()
self.sym_adjustment = sym_adjustment
self.gamma_neg = gamma_neg
self.gamma_pos = gamma_pos
self.asymmetric_focus = gamma_neg > 0 or gamma_pos > 0
self.auto_balance = auto_balance
self.eps = eps
self.label_smooth = label_smooth
self.m = m
self.t = t
self.k = k
self.s = s
(self.targets) = (self.anti_targets) = (self.xs_pos) = (self.xs_neg
) = (self.asymmetric_w) = (self.loss) = None
def get_last_scale(self):
return self.s
def sym_adjust(self, z):
return 2 * torch.pow((z + 1) / 2, self.t) - 1
def forward(self, cos_theta, targets):
""""
Parameters
----------
cos_theta: dot product between normalized features and proxies
targets: targets (multi-label binarized vector)
"""
if self.label_smooth > 0:
targets = targets * (1 - self.label_smooth)
targets[targets == 0] = self.label_smooth
self.targets = targets
self.anti_targets = 1 - targets
if self.sym_adjustment:
cos_theta = self.sym_adjust(cos_theta)
self.xs_pos = torch.sigmoid(self.s * (cos_theta - self.m))
self.xs_neg = torch.sigmoid(self.s * (-cos_theta - self.m))
if self.auto_balance:
assert not self.asymmetric_focus, 'Auto balance is not compatible with asymmetric focussing'
K = self.targets.size(1)
C = self.targets.sum(1, keepdim=True)
balance_koeff_pos = (K - C) / K
balance_koeff_neg = 1 - balance_koeff_pos
elif self.asymmetric_focus:
balance_koeff_pos = 1
balance_koeff_neg = 1
else:
assert not self.asymmetric_focus and not self.auto_balance
balance_koeff_pos = self.k / self.s
balance_koeff_neg = (1 - self.k) / self.s
self.loss = balance_koeff_pos * self.targets * torch.log(1 + torch.
exp(-self.s * (cos_theta - self.m)))
self.loss.add_(balance_koeff_neg * self.anti_targets * torch.log(1 +
torch.exp(self.s * (cos_theta + self.m))))
if self.asymmetric_focus:
self.xs_pos *= self.targets
self.xs_neg *= self.anti_targets
self.asymmetric_w = torch.pow(1 - self.xs_pos - self.xs_neg,
self.gamma_pos * self.targets + self.gamma_neg * self.
anti_targets)
self.loss *= self.asymmetric_w
return self.loss.sum()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_exp_log_mul_neg_rsub_sigmoid_sub_sum_0(in_ptr0,
in_ptr1, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp5 = tl.load(in_ptr1 + r0, None)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp3 = 0.02666666666666667
tmp4 = tmp0 * tmp3
tmp6 = 0.35
tmp7 = tmp5 - tmp6
tmp8 = -30.0
tmp9 = tmp7 * tmp8
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp10 + tmp1
tmp12 = tl_math.log(tmp11)
tmp13 = tmp4 * tmp12
tmp14 = 0.006666666666666665
tmp15 = tmp2 * tmp14
tmp16 = tmp5 + tmp6
tmp17 = 30.0
tmp18 = tmp16 * tmp17
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp19 + tmp1
tmp21 = tl_math.log(tmp20)
tmp22 = tmp15 * tmp21
tmp23 = tmp13 + tmp22
tmp24 = -tmp5
tmp25 = tmp24 - tmp6
tmp26 = tmp25 * tmp17
tmp27 = tl.sigmoid(tmp26)
tmp28 = tmp7 * tmp17
tmp29 = tl.sigmoid(tmp28)
tmp30 = tl.broadcast_to(tmp23, [RBLOCK])
tmp32 = triton_helpers.promote_to_tensor(tl.sum(tmp30, 0))
tl.store(out_ptr0 + tl.broadcast_to(r0, [RBLOCK]), tmp2, None)
tl.store(out_ptr1 + tl.broadcast_to(r0, [RBLOCK]), tmp23, None)
tl.store(out_ptr2 + tl.broadcast_to(r0, [RBLOCK]), tmp27, None)
tl.store(out_ptr3 + tl.broadcast_to(r0, [RBLOCK]), tmp29, None)
tl.store(out_ptr4 + tl.full([1], 0, tl.int32), tmp32, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf2 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_add_exp_log_mul_neg_rsub_sigmoid_sub_sum_0[grid(1)](
arg0_1, arg1_1, buf0, buf1, buf3, buf4, buf2, 1, 256, num_warps
=2, num_stages=1)
del arg0_1
del arg1_1
return buf2, buf1, buf3, buf4, buf0
class AMBinaryLossNew(nn.Module):
def __init__(self, m=0.35, k=0.8, t=1, s=30, eps=1e-08, sym_adjustment=
False, auto_balance=False, label_smooth=0.0, gamma_neg=0, gamma_pos=0):
super().__init__()
self.sym_adjustment = sym_adjustment
self.gamma_neg = gamma_neg
self.gamma_pos = gamma_pos
self.asymmetric_focus = gamma_neg > 0 or gamma_pos > 0
self.auto_balance = auto_balance
self.eps = eps
self.label_smooth = label_smooth
self.m = m
self.t = t
self.k = k
self.s = s
(self.targets) = (self.anti_targets) = (self.xs_pos) = (self.xs_neg
) = (self.asymmetric_w) = (self.loss) = None
def get_last_scale(self):
return self.s
def sym_adjust(self, z):
return 2 * torch.pow((z + 1) / 2, self.t) - 1
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| daniil-lyakhov/deep-object-reid | AMBinaryLoss | false | 1,777 | [
"Apache-2.0"
] | 0 | b0f7d6a2d4cff8c417a66d82c09d16788d81ec67 | https://github.com/daniil-lyakhov/deep-object-reid/tree/b0f7d6a2d4cff8c417a66d82c09d16788d81ec67 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, m=0.35, k=0.8, t=1, s=30, eps=1e-08, sym_adjustment=
False, auto_balance=False, label_smooth=0.0, gamma_neg=0, gamma_pos=0):
super().__init__()
self.sym_adjustment = sym_adjustment
self.gamma_neg = gamma_neg
self.gamma_pos = gamma_pos
self.asymmetric_focus = gamma_neg > 0 or gamma_pos > 0
self.auto_balance = auto_balance
self.eps = eps
self.label_smooth = label_smooth
self.m = m
self.t = t
self.k = k
self.s = s
(self.targets) = (self.anti_targets) = (self.xs_pos) = (self.xs_neg
) = (self.asymmetric_w) = (self.loss) = None
def get_last_scale(self):
return self.s
def sym_adjust(self, z):
return 2 * torch.pow((z + 1) / 2, self.t) - 1
def forward(self, cos_theta, targets):
""""
Parameters
----------
cos_theta: dot product between normalized features and proxies
targets: targets (multi-label binarized vector)
"""
if self.label_smooth > 0:
targets = targets * (1 - self.label_smooth)
targets[targets == 0] = self.label_smooth
self.targets = targets
self.anti_targets = 1 - targets
if self.sym_adjustment:
cos_theta = self.sym_adjust(cos_theta)
self.xs_pos = torch.sigmoid(self.s * (cos_theta - self.m))
self.xs_neg = torch.sigmoid(self.s * (-cos_theta - self.m))
if self.auto_balance:
assert not self.asymmetric_focus, 'Auto balance is not compatible with asymmetric focussing'
K = self.targets.size(1)
C = self.targets.sum(1, keepdim=True)
balance_koeff_pos = (K - C) / K
balance_koeff_neg = 1 - balance_koeff_pos
elif self.asymmetric_focus:
balance_koeff_pos = 1
balance_koeff_neg = 1
else:
assert not self.asymmetric_focus and not self.auto_balance
balance_koeff_pos = self.k / self.s
balance_koeff_neg = (1 - self.k) / self.s
self.loss = balance_koeff_pos * self.targets * torch.log(1 + torch.
exp(-self.s * (cos_theta - self.m)))
self.loss.add_(balance_koeff_neg * self.anti_targets * torch.log(1 +
torch.exp(self.s * (cos_theta + self.m))))
if self.asymmetric_focus:
self.xs_pos *= self.targets
self.xs_neg *= self.anti_targets
self.asymmetric_w = torch.pow(1 - self.xs_pos - self.xs_neg,
self.gamma_pos * self.targets + self.gamma_neg * self.
anti_targets)
self.loss *= self.asymmetric_w
return self.loss.sum()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
MaxPoolBranch | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/xf/cxfkyenlpfqc6cllorffbbhsxkwovc2lia2dqrt6eqilrdk3gldt.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x => getitem
# Graph fragment:
# %getitem : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_0 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (16*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (16*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (16*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (4 + (16*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (5 + (16*x0)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (6 + (16*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (8 + (16*x0)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (9 + (16*x0)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (10 + (16*x0)), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tl.store(out_ptr0 + (x0), tmp16, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices]
stream0 = get_raw_stream(0)
triton_poi_fused_max_pool2d_with_indices_0.run(arg0_1, buf0, 16, grid=grid(16), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class MaxPoolBranch(nn.Module):
"""
InceptionV4 specific max pooling branch block.
"""
def __init__(self, kernel_size=3, stride=2, padding=0):
super(MaxPoolBranch, self).__init__()
self.pool = nn.MaxPool2d(kernel_size=kernel_size, stride=stride,
padding=padding)
def forward(self, x):
x = self.pool(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp7 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp9 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp11 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp13 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tl.store(out_ptr0 + x0, tmp16, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_max_pool2d_with_indices_0[grid(16)](arg0_1, buf0,
16, XBLOCK=16, num_warps=1, num_stages=1)
del arg0_1
return buf0,
class MaxPoolBranchNew(nn.Module):
"""
InceptionV4 specific max pooling branch block.
"""
def __init__(self, kernel_size=3, stride=2, padding=0):
super(MaxPoolBranchNew, self).__init__()
self.pool = nn.MaxPool2d(kernel_size=kernel_size, stride=stride,
padding=padding)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| daniil-lyakhov/deep-object-reid | MaxPoolBranch | false | 1,778 | [
"Apache-2.0"
] | 0 | b0f7d6a2d4cff8c417a66d82c09d16788d81ec67 | https://github.com/daniil-lyakhov/deep-object-reid/tree/b0f7d6a2d4cff8c417a66d82c09d16788d81ec67 | import torch
import torch.nn as nn
class Model(nn.Module):
"""
InceptionV4 specific max pooling branch block.
"""
def __init__(self, kernel_size=3, stride=2, padding=0):
super().__init__()
self.pool = nn.MaxPool2d(kernel_size=kernel_size, stride=stride,
padding=padding)
def forward(self, x):
x = self.pool(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
AsymmetricLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/oz/cozjbbqlonk63a5vf6mhisuyouuqlak4adjvttdtvxmglned5dg4.py
# Topologically Sorted Source Nodes: [sigmoid, clamp, log, mul, sub, sub_1, add_, clamp_, clamp_1, log_1, mul_1, add__1, mul_2, sub_2, mul_3, sub_3, mul_4, mul_5, add, pow_1, imul, sum_1, neg], Original ATen: [aten.sigmoid, aten.clamp, aten.log, aten.mul, aten.rsub, aten.add, aten.sub, aten.pow, aten.sum, aten.neg]
# Source node to ATen node mapping:
# add => add_2
# add_ => add
# add__1 => add_1
# clamp => clamp_min
# clamp_ => clamp_max
# clamp_1 => clamp_min_1
# imul => mul_6
# log => log
# log_1 => log_1
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# mul_3 => mul_3
# mul_4 => mul_4
# mul_5 => mul_5
# neg => neg
# pow_1 => pow_1
# sigmoid => sigmoid
# sub => sub
# sub_1 => sub_1
# sub_2 => sub_2
# sub_3 => sub_3
# sum_1 => sum_1
# Graph fragment:
# %sigmoid : [num_users=3] = call_function[target=torch.ops.aten.sigmoid.default](args = (%arg1_1,), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sigmoid, 1e-08), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%clamp_min,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %log), kwargs = {})
# %sub : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg0_1), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %sigmoid), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_1, 0.05), kwargs = {})
# %clamp_max : [num_users=2] = call_function[target=torch.ops.aten.clamp_max.default](args = (%add, 1), kwargs = {})
# %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%clamp_max, 1e-08), kwargs = {})
# %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%clamp_min_1,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %log_1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
# %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %arg0_1), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %mul_2), kwargs = {})
# %mul_3 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%clamp_max, %sub), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_2, %mul_3), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 0), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, 4), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %mul_5), kwargs = {})
# %pow_1 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%sub_3, %add_2), kwargs = {})
# %mul_6 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, %pow_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_6,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sum_1,), kwargs = {})
triton_per_fused_add_clamp_log_mul_neg_pow_rsub_sigmoid_sub_sum_0 = async_compile.triton('triton_per_fused_add_clamp_log_mul_neg_pow_rsub_sigmoid_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {8: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 9), equal_to_1=(8,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_clamp_log_mul_neg_pow_rsub_sigmoid_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_clamp_log_mul_neg_pow_rsub_sigmoid_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp3 = tl.load(in_ptr1 + (r0), None)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tl.sigmoid(tmp3)
tmp5 = tmp4 * tmp0
tmp6 = tmp1 - tmp4
tmp7 = 0.05
tmp8 = tmp6 + tmp7
tmp9 = triton_helpers.minimum(tmp8, tmp1)
tmp10 = tmp9 * tmp2
tmp11 = tmp1 - tmp5
tmp12 = tmp11 - tmp10
tmp13 = 0.0
tmp14 = tmp0 * tmp13
tmp15 = 4.0
tmp16 = tmp2 * tmp15
tmp17 = tmp14 + tmp16
tmp18 = libdevice.pow(tmp12, tmp17)
tmp19 = 1e-08
tmp20 = triton_helpers.maximum(tmp4, tmp19)
tmp21 = tl_math.log(tmp20)
tmp22 = tmp0 * tmp21
tmp23 = triton_helpers.maximum(tmp9, tmp19)
tmp24 = tl_math.log(tmp23)
tmp25 = tmp2 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = tmp26 * tmp18
tmp28 = tl.broadcast_to(tmp27, [RBLOCK])
tmp30 = triton_helpers.promote_to_tensor(tl.sum(tmp28, 0))
tmp31 = -tmp30
tl.store(out_ptr0 + (tl.broadcast_to(r0, [RBLOCK])), tmp2, None)
tl.store(out_ptr1 + (tl.broadcast_to(r0, [RBLOCK])), tmp5, None)
tl.store(out_ptr2 + (tl.broadcast_to(r0, [RBLOCK])), tmp10, None)
tl.store(out_ptr3 + (tl.broadcast_to(r0, [RBLOCK])), tmp18, None)
tl.store(out_ptr4 + (tl.broadcast_to(r0, [RBLOCK])), tmp27, None)
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp31, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf5 = empty_strided_cuda((), (), torch.float32)
buf6 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [sigmoid, clamp, log, mul, sub, sub_1, add_, clamp_, clamp_1, log_1, mul_1, add__1, mul_2, sub_2, mul_3, sub_3, mul_4, mul_5, add, pow_1, imul, sum_1, neg], Original ATen: [aten.sigmoid, aten.clamp, aten.log, aten.mul, aten.rsub, aten.add, aten.sub, aten.pow, aten.sum, aten.neg]
stream0 = get_raw_stream(0)
triton_per_fused_add_clamp_log_mul_neg_pow_rsub_sigmoid_sub_sum_0.run(buf6, arg0_1, arg1_1, buf0, buf1, buf2, buf3, buf4, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf6, buf3, buf4, buf2, buf1, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class AsymmetricLoss(nn.Module):
""" Notice - optimized version, minimizes memory allocation and gpu uploading,
favors inplace operations"""
def __init__(self, gamma_neg=4, gamma_pos=0, probability_margin=0.05,
eps=1e-08, label_smooth=0.0):
super().__init__()
self.gamma_neg = gamma_neg
self.gamma_pos = gamma_pos
self.label_smooth = label_smooth
self.clip = probability_margin
self.eps = eps
(self.targets) = (self.anti_targets) = (self.xs_pos) = (self.xs_neg
) = (self.asymmetric_w) = (self.loss) = None
def get_last_scale(self):
return 1.0
def forward(self, inputs, targets):
""""
Parameters
----------
inputs: input logits
targets: targets (multi-label binarized vector)
"""
if self.label_smooth > 0:
targets = targets * (1 - self.label_smooth)
targets[targets == 0] = self.label_smooth
self.targets = targets
self.anti_targets = 1 - targets
self.xs_pos = torch.sigmoid(inputs)
self.xs_neg = 1.0 - self.xs_pos
if self.clip is not None and self.clip > 0:
self.xs_neg.add_(self.clip).clamp_(max=1)
self.loss = self.targets * torch.log(self.xs_pos.clamp(min=self.eps))
self.loss.add_(self.anti_targets * torch.log(self.xs_neg.clamp(min=
self.eps)))
if self.gamma_neg > 0 or self.gamma_pos > 0:
self.xs_pos = self.xs_pos * self.targets
self.xs_neg = self.xs_neg * self.anti_targets
self.asymmetric_w = torch.pow(1 - self.xs_pos - self.xs_neg,
self.gamma_pos * self.targets + self.gamma_neg * self.
anti_targets)
self.loss *= self.asymmetric_w
return -self.loss.sum()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_clamp_log_mul_neg_pow_rsub_sigmoid_sub_sum_0(
in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, out_ptr3,
out_ptr4, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tl.sigmoid(tmp3)
tmp5 = tmp4 * tmp0
tmp6 = tmp1 - tmp4
tmp7 = 0.05
tmp8 = tmp6 + tmp7
tmp9 = triton_helpers.minimum(tmp8, tmp1)
tmp10 = tmp9 * tmp2
tmp11 = tmp1 - tmp5
tmp12 = tmp11 - tmp10
tmp13 = 0.0
tmp14 = tmp0 * tmp13
tmp15 = 4.0
tmp16 = tmp2 * tmp15
tmp17 = tmp14 + tmp16
tmp18 = libdevice.pow(tmp12, tmp17)
tmp19 = 1e-08
tmp20 = triton_helpers.maximum(tmp4, tmp19)
tmp21 = tl_math.log(tmp20)
tmp22 = tmp0 * tmp21
tmp23 = triton_helpers.maximum(tmp9, tmp19)
tmp24 = tl_math.log(tmp23)
tmp25 = tmp2 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = tmp26 * tmp18
tmp28 = tl.broadcast_to(tmp27, [RBLOCK])
tmp30 = triton_helpers.promote_to_tensor(tl.sum(tmp28, 0))
tmp31 = -tmp30
tl.store(out_ptr0 + tl.broadcast_to(r0, [RBLOCK]), tmp2, None)
tl.store(out_ptr1 + tl.broadcast_to(r0, [RBLOCK]), tmp5, None)
tl.store(out_ptr2 + tl.broadcast_to(r0, [RBLOCK]), tmp10, None)
tl.store(out_ptr3 + tl.broadcast_to(r0, [RBLOCK]), tmp18, None)
tl.store(out_ptr4 + tl.broadcast_to(r0, [RBLOCK]), tmp27, None)
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp31, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf5 = empty_strided_cuda((), (), torch.float32)
buf6 = buf5
del buf5
get_raw_stream(0)
triton_per_fused_add_clamp_log_mul_neg_pow_rsub_sigmoid_sub_sum_0[grid
(1)](buf6, arg0_1, arg1_1, buf0, buf1, buf2, buf3, buf4, 1, 256,
num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf6, buf3, buf4, buf2, buf1, buf0
class AsymmetricLossNew(nn.Module):
""" Notice - optimized version, minimizes memory allocation and gpu uploading,
favors inplace operations"""
def __init__(self, gamma_neg=4, gamma_pos=0, probability_margin=0.05,
eps=1e-08, label_smooth=0.0):
super().__init__()
self.gamma_neg = gamma_neg
self.gamma_pos = gamma_pos
self.label_smooth = label_smooth
self.clip = probability_margin
self.eps = eps
(self.targets) = (self.anti_targets) = (self.xs_pos) = (self.xs_neg
) = (self.asymmetric_w) = (self.loss) = None
def get_last_scale(self):
return 1.0
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| daniil-lyakhov/deep-object-reid | AsymmetricLoss | false | 1,779 | [
"Apache-2.0"
] | 0 | b0f7d6a2d4cff8c417a66d82c09d16788d81ec67 | https://github.com/daniil-lyakhov/deep-object-reid/tree/b0f7d6a2d4cff8c417a66d82c09d16788d81ec67 | import torch
import torch.nn as nn
class Model(nn.Module):
""" Notice - optimized version, minimizes memory allocation and gpu uploading,
favors inplace operations"""
def __init__(self, gamma_neg=4, gamma_pos=0, probability_margin=0.05,
eps=1e-08, label_smooth=0.0):
super().__init__()
self.gamma_neg = gamma_neg
self.gamma_pos = gamma_pos
self.label_smooth = label_smooth
self.clip = probability_margin
self.eps = eps
(self.targets) = (self.anti_targets) = (self.xs_pos) = (self.xs_neg
) = (self.asymmetric_w) = (self.loss) = None
def get_last_scale(self):
return 1.0
def forward(self, inputs, targets):
""""
Parameters
----------
inputs: input logits
targets: targets (multi-label binarized vector)
"""
if self.label_smooth > 0:
targets = targets * (1 - self.label_smooth)
targets[targets == 0] = self.label_smooth
self.targets = targets
self.anti_targets = 1 - targets
self.xs_pos = torch.sigmoid(inputs)
self.xs_neg = 1.0 - self.xs_pos
if self.clip is not None and self.clip > 0:
self.xs_neg.add_(self.clip).clamp_(max=1)
self.loss = self.targets * torch.log(self.xs_pos.clamp(min=self.eps))
self.loss.add_(self.anti_targets * torch.log(self.xs_neg.clamp(min=
self.eps)))
if self.gamma_neg > 0 or self.gamma_pos > 0:
self.xs_pos = self.xs_pos * self.targets
self.xs_neg = self.xs_neg * self.anti_targets
self.asymmetric_w = torch.pow(1 - self.xs_pos - self.xs_neg,
self.gamma_pos * self.targets + self.gamma_neg * self.
anti_targets)
self.loss *= self.asymmetric_w
return -self.loss.sum()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
CBOW | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/3r/c3rcleexr5nwq4qvwylgjpelwulrq7jjyvy54eszp24wxfm6tszs.py
# Topologically Sorted Source Nodes: [add, add_1, add_2, add_3], Original ATen: [aten.add]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# add_2 => add_2
# add_3 => add_3
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%select, 0), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %select_1), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %select_2), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %select_3), kwargs = {})
triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp3 = tl.load(in_ptr0 + (64 + x0), xmask)
tmp5 = tl.load(in_ptr0 + (128 + x0), xmask)
tmp7 = tl.load(in_ptr0 + (192 + x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, add_1, add_2, add_3], Original ATen: [aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_0.run(primals_1, buf0, 64, grid=grid(64), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_2
del primals_3
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_5
return (reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf0, (16, 4), (4, 1), 0), buf1, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class CBOW(nn.Module):
def __init__(self, input_size, hidden_size):
super().__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, input_size)
def forward(self, x):
x = sum([*x]).float()
x = self.fc1(x)
x = self.fc2(x)
return x
def save_model(self):
torch.save(self.state_dict(), 'checkpoints/cbow_model.pt')
def load_model(self):
self.load_state_dict(torch.load('checkpoints/cbow_model.pt',
map_location='cpu'))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp3 = tl.load(in_ptr0 + (64 + x0), xmask)
tmp5 = tl.load(in_ptr0 + (128 + x0), xmask)
tmp7 = tl.load(in_ptr0 + (192 + x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(64)](primals_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (16, 4), (
4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf1)
del primals_2
del primals_3
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_5
return reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(buf0, (16, 4), (4, 1), 0), buf1, primals_4
class CBOWNew(nn.Module):
def __init__(self, input_size, hidden_size):
super().__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, input_size)
def save_model(self):
torch.save(self.state_dict(), 'checkpoints/cbow_model.pt')
def load_model(self):
self.load_state_dict(torch.load('checkpoints/cbow_model.pt',
map_location='cpu'))
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| danny-1k/autocomplete_hist | CBOW | false | 1,780 | [
"BSD-2-Clause"
] | 0 | 0a553ea59e08f2ddca60a1f35e9cf14d43370100 | https://github.com/danny-1k/autocomplete_hist/tree/0a553ea59e08f2ddca60a1f35e9cf14d43370100 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, input_size, hidden_size):
super().__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, input_size)
def forward(self, x):
x = sum([*x]).float()
x = self.fc1(x)
x = self.fc2(x)
return x
def save_model(self):
torch.save(self.state_dict(), 'checkpoints/cbow_model.pt')
def load_model(self):
self.load_state_dict(torch.load('checkpoints/cbow_model.pt',
map_location='cpu'))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
GumbelSigmoid | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/vf/cvfvuqk25moqakdnjwzlkkznzqalfphu6cnd2p65ajz6725xnlag.py
# Topologically Sorted Source Nodes: [mul, sigmoid], Original ATen: [aten.mul, aten.sigmoid]
# Source node to ATen node mapping:
# mul => mul
# sigmoid => sigmoid
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 1.0), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%mul,), kwargs = {})
triton_poi_fused_mul_sigmoid_0 = async_compile.triton('triton_poi_fused_mul_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, sigmoid], Original ATen: [aten.mul, aten.sigmoid]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_sigmoid_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
def gumbel(x, eps=1e-20):
return -torch.log(-torch.log(torch.rand_like(x) + eps) + eps)
class GumbelSigmoid(nn.Module):
def __init__(self, scale=1.0):
super().__init__()
self.scale = float(scale)
def forward(self, logits):
y = logits + gumbel(logits) if self.training else logits
return torch.sigmoid(self.scale * y)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_sigmoid_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(out_ptr0 + x0, tmp3, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_sigmoid_0[grid(256)](arg0_1, buf0, 256, XBLOCK
=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
def gumbel(x, eps=1e-20):
return -torch.log(-torch.log(torch.rand_like(x) + eps) + eps)
class GumbelSigmoidNew(nn.Module):
def __init__(self, scale=1.0):
super().__init__()
self.scale = float(scale)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| daniil-lyakhov/deep-object-reid | GumbelSigmoid | false | 1,781 | [
"Apache-2.0"
] | 0 | b0f7d6a2d4cff8c417a66d82c09d16788d81ec67 | https://github.com/daniil-lyakhov/deep-object-reid/tree/b0f7d6a2d4cff8c417a66d82c09d16788d81ec67 | import torch
import torch.nn as nn
def gumbel(x, eps=1e-20):
return -torch.log(-torch.log(torch.rand_like(x) + eps) + eps)
class Model(nn.Module):
def __init__(self, scale=1.0):
super().__init__()
self.scale = float(scale)
def forward(self, logits):
y = logits + gumbel(logits) if self.training else logits
return torch.sigmoid(self.scale * y)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
TorchDiceLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/p5/cp5e36tvlqnohdq5zb5zfusraebda24phs75lmdmy4l5an3pldil.py
# Topologically Sorted Source Nodes: [mul, intersection, mul_1, add_2, sum_2, sum_3, add, union, truediv, sub, loss], Original ATen: [aten.mul, aten.sum, aten.add, aten.div, aten.rsub, aten.mean]
# Source node to ATen node mapping:
# add => add
# add_2 => add_2
# intersection => sum_1
# loss => mean
# mul => mul
# mul_1 => mul_1
# sub => sub
# sum_2 => sum_2
# sum_3 => sum_3
# truediv => div
# union => add_1
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %view), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 2), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, 1e-05), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view_1, [1]), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view, [1]), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, %sum_3), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, 1e-05), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_2, %add_1), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub,), kwargs = {})
triton_per_fused_add_div_mean_mul_rsub_sum_0 = async_compile.triton('triton_per_fused_add_div_mean_mul_rsub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mean_mul_rsub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mean_mul_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.broadcast_to(tmp0, [RBLOCK])
tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0))
tmp9 = tl.broadcast_to(tmp1, [RBLOCK])
tmp11 = triton_helpers.promote_to_tensor(tl.sum(tmp9, 0))
tmp12 = 2.0
tmp13 = tmp5 * tmp12
tmp14 = 1e-05
tmp15 = tmp13 + tmp14
tmp16 = tmp8 + tmp11
tmp17 = tmp16 + tmp14
tmp18 = tmp15 / tmp17
tmp19 = 1.0
tmp20 = tmp19 - tmp18
tmp21 = tmp20 / tmp19
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp21, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, ), (1, ), torch.float32)
buf3 = reinterpret_tensor(buf0, (), (), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [mul, intersection, mul_1, add_2, sum_2, sum_3, add, union, truediv, sub, loss], Original ATen: [aten.mul, aten.sum, aten.add, aten.div, aten.rsub, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_mean_mul_rsub_sum_0.run(buf3, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
def soft_dice_loss(outputs, targets, per_image=False):
batch_size = outputs.size()[0]
eps = 1e-05
if not per_image:
batch_size = 1
dice_target = targets.contiguous().view(batch_size, -1).float()
dice_output = outputs.contiguous().view(batch_size, -1)
intersection = torch.sum(dice_output * dice_target, dim=1)
union = torch.sum(dice_output, dim=1) + torch.sum(dice_target, dim=1) + eps
loss = (1 - (2 * intersection + eps) / union).mean()
return loss
class TorchDiceLoss(nn.Module):
def __init__(self, weight=None, size_average=True, per_image=False):
super().__init__()
self.size_average = size_average
self.register_buffer('weight', weight)
self.per_image = per_image
def forward(self, input, target):
return soft_dice_loss(input, target, per_image=self.per_image)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_add_div_mean_mul_rsub_sum_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.broadcast_to(tmp0, [RBLOCK])
tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0))
tmp9 = tl.broadcast_to(tmp1, [RBLOCK])
tmp11 = triton_helpers.promote_to_tensor(tl.sum(tmp9, 0))
tmp12 = 2.0
tmp13 = tmp5 * tmp12
tmp14 = 1e-05
tmp15 = tmp13 + tmp14
tmp16 = tmp8 + tmp11
tmp17 = tmp16 + tmp14
tmp18 = tmp15 / tmp17
tmp19 = 1.0
tmp20 = tmp19 - tmp18
tmp21 = tmp20 / tmp19
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp21, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1,), (1,), torch.float32)
buf3 = reinterpret_tensor(buf0, (), (), 0)
del buf0
get_raw_stream(0)
triton_per_fused_add_div_mean_mul_rsub_sum_0[grid(1)](buf3, arg0_1,
arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf3,
def soft_dice_loss(outputs, targets, per_image=False):
batch_size = outputs.size()[0]
eps = 1e-05
if not per_image:
batch_size = 1
dice_target = targets.contiguous().view(batch_size, -1).float()
dice_output = outputs.contiguous().view(batch_size, -1)
intersection = torch.sum(dice_output * dice_target, dim=1)
union = torch.sum(dice_output, dim=1) + torch.sum(dice_target, dim=1) + eps
loss = (1 - (2 * intersection + eps) / union).mean()
return loss
class TorchDiceLossNew(nn.Module):
def __init__(self, weight=None, size_average=True, per_image=False):
super().__init__()
self.size_average = size_average
self.register_buffer('weight', weight)
self.per_image = per_image
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| dannyjeck-matroid/solaris | TorchDiceLoss | false | 1,782 | [
"Apache-2.0"
] | 0 | 463d220c1fe14f811cbbbf528a7353022538006e | https://github.com/dannyjeck-matroid/solaris/tree/463d220c1fe14f811cbbbf528a7353022538006e | import torch
from torch import nn
def soft_dice_loss(outputs, targets, per_image=False):
batch_size = outputs.size()[0]
eps = 1e-05
if not per_image:
batch_size = 1
dice_target = targets.contiguous().view(batch_size, -1).float()
dice_output = outputs.contiguous().view(batch_size, -1)
intersection = torch.sum(dice_output * dice_target, dim=1)
union = torch.sum(dice_output, dim=1) + torch.sum(dice_target, dim=1) + eps
loss = (1 - (2 * intersection + eps) / union).mean()
return loss
class Model(nn.Module):
def __init__(self, weight=None, size_average=True, per_image=False):
super().__init__()
self.size_average = size_average
self.register_buffer('weight', weight)
self.per_image = per_image
def forward(self, input, target):
return soft_dice_loss(input, target, per_image=self.per_image)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
Normalize | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/mh/cmhnzt6ukobhn6jgbkhu2rn7dqxvbqkltkwplhlgeirbz5tft3kx.py
# Topologically Sorted Source Nodes: [sub, sub_1, add, audio, mul, sub_2], Original ATen: [aten.sub, aten.add, aten.div, aten.mul]
# Source node to ATen node mapping:
# add => add
# audio => div
# mul => mul
# sub => sub
# sub_1 => sub_1
# sub_2 => sub_2
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %unsqueeze_1), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%unsqueeze, %unsqueeze_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_1, 1e-10), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %add), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, 2), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, 1), kwargs = {})
triton_poi_fused_add_div_mul_sub_0 = async_compile.triton('triton_poi_fused_add_div_mul_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mul_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.minimum(tmp1, tmp2)
tmp5 = triton_helpers.minimum(tmp3, tmp4)
tmp7 = triton_helpers.minimum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = triton_helpers.maximum(tmp1, tmp2)
tmp10 = triton_helpers.maximum(tmp9, tmp4)
tmp11 = triton_helpers.maximum(tmp10, tmp6)
tmp12 = tmp11 - tmp7
tmp13 = 1e-10
tmp14 = tmp12 + tmp13
tmp15 = tmp8 / tmp14
tmp16 = 2.0
tmp17 = tmp15 * tmp16
tmp18 = 1.0
tmp19 = tmp17 - tmp18
tl.store(out_ptr0 + (x2), tmp19, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sub, sub_1, add, audio, mul, sub_2], Original ATen: [aten.sub, aten.add, aten.div, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_mul_sub_0.run(arg0_1, buf0, 16, grid=grid(16), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Normalize(nn.Module):
"""
Scale Audio to be between -1 and 1
"""
def __init__(self):
super(Normalize, self).__init__()
def forward(self, audio: 'torch.Tensor'):
if len(audio.shape) != 2:
raise ValueError('Audio should be 2D: [batch_size X audio_length]')
if audio.shape[1] < 1:
raise ValueError('Audio length is zero')
max_value = torch.max(audio, dim=1)[0].detach()
min_value = torch.min(audio, dim=1)[0].detach()
max_value = torch.unsqueeze(max_value, 1)
min_value = torch.unsqueeze(min_value, 1)
audio = (audio - min_value) / (max_value - min_value + 1e-10)
return audio * 2 - 1
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mul_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.minimum(tmp1, tmp2)
tmp5 = triton_helpers.minimum(tmp3, tmp4)
tmp7 = triton_helpers.minimum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = triton_helpers.maximum(tmp1, tmp2)
tmp10 = triton_helpers.maximum(tmp9, tmp4)
tmp11 = triton_helpers.maximum(tmp10, tmp6)
tmp12 = tmp11 - tmp7
tmp13 = 1e-10
tmp14 = tmp12 + tmp13
tmp15 = tmp8 / tmp14
tmp16 = 2.0
tmp17 = tmp15 * tmp16
tmp18 = 1.0
tmp19 = tmp17 - tmp18
tl.store(out_ptr0 + x2, tmp19, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mul_sub_0[grid(16)](arg0_1, buf0, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del arg0_1
return buf0,
class NormalizeNew(nn.Module):
"""
Scale Audio to be between -1 and 1
"""
def __init__(self):
super(NormalizeNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| dariocazzani/vo-id | Normalize | false | 1,783 | [
"MIT"
] | 0 | 41d0f2779e7909cfa15afcb6c8222c48a5855eb8 | https://github.com/dariocazzani/vo-id/tree/41d0f2779e7909cfa15afcb6c8222c48a5855eb8 | import torch
import torch.nn as nn
class Model(nn.Module):
"""
Scale Audio to be between -1 and 1
"""
def __init__(self):
super().__init__()
def forward(self, audio: 'torch.Tensor'):
if len(audio.shape) != 2:
raise ValueError('Audio should be 2D: [batch_size X audio_length]')
if audio.shape[1] < 1:
raise ValueError('Audio length is zero')
max_value = torch.max(audio, dim=1)[0].detach()
min_value = torch.min(audio, dim=1)[0].detach()
max_value = torch.unsqueeze(max_value, 1)
min_value = torch.unsqueeze(min_value, 1)
audio = (audio - min_value) / (max_value - min_value + 1e-10)
return audio * 2 - 1
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return []
|
LCTGate | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/ad/cad4qvn3jifa3tm7lfx26jzvdusg7wibp76k4ovqzc7tiby33yfr.py
# Topologically Sorted Source Nodes: [x, x_1, x_2, mul], Original ATen: [aten.mean, aten.native_group_norm, aten.sigmoid, aten.mul]
# Source node to ATen node mapping:
# mul => mul_2
# x => mean
# x_1 => add, add_1, mul, mul_1, rsqrt, sub, var_mean
# x_2 => sigmoid
# Graph fragment:
# %mean : [num_users=3] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [-1, -2], True), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%mean, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mean, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %unsqueeze_5), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %unsqueeze_2), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add_1,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %sigmoid), kwargs = {})
triton_per_fused_mean_mul_native_group_norm_sigmoid_0 = async_compile.triton('triton_per_fused_mean_mul_native_group_norm_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_mul_native_group_norm_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_mul_native_group_norm_sigmoid_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp16 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tmp7 = 1.0
tmp8 = tmp6 / tmp7
tmp9 = tmp6 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp10 / tmp7
tmp12 = 1e-05
tmp13 = tmp11 + tmp12
tmp14 = libdevice.rsqrt(tmp13)
tmp15 = tmp9 * tmp14
tmp17 = tmp15 * tmp16
tmp19 = tmp17 + tmp18
tmp20 = tl.sigmoid(tmp19)
tmp21 = tmp0 * tmp20
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp6, xmask)
tl.store(out_ptr1 + (r1 + (16*x0)), tmp21, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0); del buf0 # reuse
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x, x_1, x_2, mul], Original ATen: [aten.mean, aten.native_group_norm, aten.sigmoid, aten.mul]
stream0 = get_raw_stream(0)
triton_per_fused_mean_mul_native_group_norm_sigmoid_0.run(buf1, primals_1, primals_2, primals_3, buf3, 16, 16, grid=grid(16), stream=stream0)
return (buf3, primals_1, primals_2, primals_3, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class LCTGate(nn.Module):
def __init__(self, channels, groups=16):
super(LCTGate, self).__init__()
assert channels > 0
assert groups > 0
while channels % groups != 0:
groups //= 2
self.gn = nn.GroupNorm(groups, channels, affine=True)
nn.init.ones_(self.gn.bias)
nn.init.zeros_(self.gn.weight)
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
self.gate_activation = nn.Sigmoid()
def forward(self, x):
input = x
x = self.global_avgpool(x)
x = self.gn(x)
x = self.gate_activation(x)
return input * x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channels': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_mul_native_group_norm_sigmoid_0(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp16 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tmp7 = 1.0
tmp8 = tmp6 / tmp7
tmp9 = tmp6 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp10 / tmp7
tmp12 = 1e-05
tmp13 = tmp11 + tmp12
tmp14 = libdevice.rsqrt(tmp13)
tmp15 = tmp9 * tmp14
tmp17 = tmp15 * tmp16
tmp19 = tmp17 + tmp18
tmp20 = tl.sigmoid(tmp19)
tmp21 = tmp0 * tmp20
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
tl.store(out_ptr1 + (r1 + 16 * x0), tmp21, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf0
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_mean_mul_native_group_norm_sigmoid_0[grid(16)](buf1,
primals_1, primals_2, primals_3, buf3, 16, 16, XBLOCK=8,
num_warps=2, num_stages=1)
return buf3, primals_1, primals_2, primals_3, buf1
class LCTGateNew(nn.Module):
def __init__(self, channels, groups=16):
super(LCTGateNew, self).__init__()
assert channels > 0
assert groups > 0
while channels % groups != 0:
groups //= 2
self.gn = nn.GroupNorm(groups, channels, affine=True)
nn.init.ones_(self.gn.bias)
nn.init.zeros_(self.gn.weight)
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
self.gate_activation = nn.Sigmoid()
def forward(self, input_0):
primals_2 = self.gn.weight
primals_3 = self.gn.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| daniil-lyakhov/deep-object-reid | LCTGate | false | 1,784 | [
"Apache-2.0"
] | 0 | b0f7d6a2d4cff8c417a66d82c09d16788d81ec67 | https://github.com/daniil-lyakhov/deep-object-reid/tree/b0f7d6a2d4cff8c417a66d82c09d16788d81ec67 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, channels, groups=16):
super().__init__()
assert channels > 0
assert groups > 0
while channels % groups != 0:
groups //= 2
self.gn = nn.GroupNorm(groups, channels, affine=True)
nn.init.ones_(self.gn.bias)
nn.init.zeros_(self.gn.weight)
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
self.gate_activation = nn.Sigmoid()
def forward(self, x):
input = x
x = self.global_avgpool(x)
x = self.gn(x)
x = self.gate_activation(x)
return input * x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
DiceLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/d5/cd52mlrkv7fg7ptxw5vaz664ocriv2bgu4unk4oe24v6nbgmv7uv.py
# Topologically Sorted Source Nodes: [mul, intersection, ground_o, pred_o], Original ATen: [aten.mul, aten.sum]
# Source node to ATen node mapping:
# ground_o => sum_2
# intersection => sum_1
# mul => mul
# pred_o => sum_3
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %arg0_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [2, 3]), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%arg1_1, [2, 3]), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%arg0_1, [2, 3]), kwargs = {})
triton_per_fused_mul_sum_0 = async_compile.triton('triton_per_fused_mul_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mul_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + (16*x0)), xmask, other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp7 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp13 = tl.where(xmask, tmp11, 0)
tmp14 = tl.sum(tmp13, 1)[:, None]
tl.store(out_ptr0 + (x0), tmp6, xmask)
tl.store(out_ptr1 + (x0), tmp10, xmask)
tl.store(out_ptr2 + (x0), tmp14, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/hj/chjrhkjo6m5kpftlvex45jdnrr3g3jrhba4nk3qs657tmtf2uepl.py
# Topologically Sorted Source Nodes: [mul_1, add_1, denominator, add_2, truediv, f, f_1], Original ATen: [aten.mul, aten.add, aten.div, aten.rsub, aten.mean]
# Source node to ATen node mapping:
# add_1 => add_1
# add_2 => add_2
# denominator => add
# f => sub
# f_1 => mean
# mul_1 => mul_1
# truediv => div
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 2.0), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, 1e-05), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, %sum_3), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, 1e-05), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_1, %add_2), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %div), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub,), kwargs = {})
triton_per_fused_add_div_mean_mul_rsub_1 = async_compile.triton('triton_per_fused_add_div_mean_mul_rsub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mean_mul_rsub_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mean_mul_rsub_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp5 = tl.load(in_ptr1 + (r0), None)
tmp6 = tl.load(in_ptr2 + (r0), None)
tmp1 = 2.0
tmp2 = tmp0 * tmp1
tmp3 = 1e-05
tmp4 = tmp2 + tmp3
tmp7 = tmp5 + tmp6
tmp8 = tmp7 + tmp3
tmp9 = tmp4 / tmp8
tmp10 = 1.0
tmp11 = tmp10 - tmp9
tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK])
tmp14 = tl.sum(tmp12, 1)[:, None]
tmp15 = 16.0
tmp16 = tmp14 / tmp15
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp16, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, intersection, ground_o, pred_o], Original ATen: [aten.mul, aten.sum]
stream0 = get_raw_stream(0)
triton_per_fused_mul_sum_0.run(arg1_1, arg0_1, buf0, buf1, buf2, 16, 16, grid=grid(16), stream=stream0)
del arg0_1
del arg1_1
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [mul_1, add_1, denominator, add_2, truediv, f, f_1], Original ATen: [aten.mul, aten.add, aten.div, aten.rsub, aten.mean]
triton_per_fused_add_div_mean_mul_rsub_1.run(buf4, buf0, buf1, buf2, 1, 16, grid=grid(1), stream=stream0)
del buf0
del buf1
del buf2
return (buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import collections
import torch
import warnings
from typing import Optional
from typing import Union
from typing import Any
from typing import Callable
from typing import Tuple
import torch.nn
from torch.nn.modules.loss import _Loss
from enum import Enum
import collections.abc
def issequenceiterable(obj: 'Any') ->bool:
"""
Determine if the object is an iterable sequence and is not a string.
"""
if torch.is_tensor(obj):
return int(obj.dim()) > 0
return isinstance(obj, collections.abc.Iterable) and not isinstance(obj,
str)
def ensure_tuple(vals: 'Any') ->Tuple[Any, ...]:
"""
Returns a tuple of `vals`.
"""
if not issequenceiterable(vals):
vals = vals,
return tuple(vals)
def ensure_tuple_size(tup: 'Any', dim: 'int', pad_val: 'Any'=0) ->Tuple[Any,
...]:
"""
Returns a copy of `tup` with `dim` values by either shortened or padded with `pad_val` as necessary.
"""
tup = ensure_tuple(tup) + (pad_val,) * dim
return tuple(tup[:dim])
def one_hot(labels: 'torch.Tensor', num_classes: 'int', dtype:
'torch.dtype'=torch.float, dim: 'int'=1) ->torch.Tensor:
"""
For a tensor `labels` of dimensions B1[spatial_dims], return a tensor of dimensions `BN[spatial_dims]`
for `num_classes` N number of classes.
Example:
For every value v = labels[b,1,h,w], the value in the result at [b,v,h,w] will be 1 and all others 0.
Note that this will include the background label, thus a binary mask should be treated as having 2 classes.
"""
assert labels.dim() > 0, 'labels should have dim of 1 or more.'
if labels.ndim < dim + 1:
shape = ensure_tuple_size(labels.shape, dim + 1, 1)
labels = labels.reshape(*shape)
sh = list(labels.shape)
assert sh[dim
] == 1, 'labels should have a channel with length equals to one.'
sh[dim] = num_classes
o = torch.zeros(size=sh, dtype=dtype, device=labels.device)
labels = o.scatter_(dim=dim, index=labels.long(), value=1)
return labels
class LossReduction(Enum):
"""
See also:
- :py:class:`monai.losses.dice.DiceLoss`
- :py:class:`monai.losses.dice.GeneralizedDiceLoss`
- :py:class:`monai.losses.focal_loss.FocalLoss`
- :py:class:`monai.losses.tversky.TverskyLoss`
"""
NONE = 'none'
MEAN = 'mean'
SUM = 'sum'
class DiceLoss(_Loss):
"""
Compute average Dice loss between two tensors. It can support both multi-classes and multi-labels tasks.
Input logits `input` (BNHW[D] where N is number of classes) is compared with ground truth `target` (BNHW[D]).
Axis N of `input` is expected to have logit predictions for each class rather than being image channels,
while the same axis of `target` can be 1 or N (one-hot format). The `smooth_nr` and `smooth_dr` parameters are
values added to the intersection and union components of the inter-over-union calculation to smooth results
respectively, these values should be small. The `include_background` class attribute can be set to False for
an instance of DiceLoss to exclude the first category (channel index 0) which is by convention assumed to be
background. If the non-background segmentations are small compared to the total image size they can get
overwhelmed by the signal from the background so excluding it in such cases helps convergence.
Milletari, F. et. al. (2016) V-Net: Fully Convolutional Neural Networks forVolumetric Medical Image Segmentation, 3DV, 2016.
"""
def __init__(self, include_background: 'bool'=True, to_onehot_y: 'bool'
=False, sigmoid: 'bool'=False, softmax: 'bool'=False, other_act:
'Optional[Callable]'=None, squared_pred: 'bool'=False, jaccard:
'bool'=False, reduction: 'Union[LossReduction, str]'=LossReduction.
MEAN, smooth_nr: 'float'=1e-05, smooth_dr: 'float'=1e-05, batch:
'bool'=False) ->None:
"""
Args:
include_background: if False channel index 0 (background category) is excluded from the calculation.
to_onehot_y: whether to convert `y` into the one-hot format. Defaults to False.
sigmoid: if True, apply a sigmoid function to the prediction.
softmax: if True, apply a softmax function to the prediction.
other_act: if don't want to use `sigmoid` or `softmax`, use other callable function to execute
other activation layers, Defaults to ``None``. for example:
`other_act = torch.tanh`.
squared_pred: use squared versions of targets and predictions in the denominator or not.
jaccard: compute Jaccard Index (soft IoU) instead of dice or not.
reduction: {``"none"``, ``"mean"``, ``"sum"``}
Specifies the reduction to apply to the output. Defaults to ``"mean"``.
- ``"none"``: no reduction will be applied.
- ``"mean"``: the sum of the output will be divided by the number of elements in the output.
- ``"sum"``: the output will be summed.
smooth_nr: a small constant added to the numerator to avoid zero.
smooth_dr: a small constant added to the denominator to avoid nan.
batch: whether to sum the intersection and union areas over the batch dimension before the dividing.
Defaults to False, a Dice loss value is computed independently from each item in the batch
before any `reduction`.
Raises:
TypeError: When ``other_act`` is not an ``Optional[Callable]``.
ValueError: When more than 1 of [``sigmoid=True``, ``softmax=True``, ``other_act is not None``].
Incompatible values.
"""
super().__init__(reduction=LossReduction(reduction).value)
if other_act is not None and not callable(other_act):
raise TypeError(
f'other_act must be None or callable but is {type(other_act).__name__}.'
)
if int(sigmoid) + int(softmax) + int(other_act is not None) > 1:
raise ValueError(
'Incompatible values: more than 1 of [sigmoid=True, softmax=True, other_act is not None].'
)
self.include_background = include_background
self.to_onehot_y = to_onehot_y
self.sigmoid = sigmoid
self.softmax = softmax
self.other_act = other_act
self.squared_pred = squared_pred
self.jaccard = jaccard
self.smooth_nr = float(smooth_nr)
self.smooth_dr = float(smooth_dr)
self.batch = batch
def forward(self, input: 'torch.Tensor', target: 'torch.Tensor'
) ->torch.Tensor:
"""
Args:
input: the shape should be BNH[WD].
target: the shape should be BNH[WD].
Raises:
ValueError: When ``self.reduction`` is not one of ["mean", "sum", "none"].
"""
if self.sigmoid:
input = torch.sigmoid(input)
n_pred_ch = input.shape[1]
if self.softmax:
if n_pred_ch == 1:
warnings.warn(
'single channel prediction, `softmax=True` ignored.')
else:
input = torch.softmax(input, 1)
if self.other_act is not None:
input = self.other_act(input)
if self.to_onehot_y:
if n_pred_ch == 1:
warnings.warn(
'single channel prediction, `to_onehot_y=True` ignored.')
else:
target = one_hot(target, num_classes=n_pred_ch)
if not self.include_background:
if n_pred_ch == 1:
warnings.warn(
'single channel prediction, `include_background=False` ignored.'
)
else:
target = target[:, 1:]
input = input[:, 1:]
assert target.shape == input.shape, f'ground truth has differing shape ({target.shape}) from input ({input.shape})'
reduce_axis = list(range(2, len(input.shape)))
if self.batch:
reduce_axis = [0] + reduce_axis
intersection = torch.sum(target * input, dim=reduce_axis)
if self.squared_pred:
target = torch.pow(target, 2)
input = torch.pow(input, 2)
ground_o = torch.sum(target, dim=reduce_axis)
pred_o = torch.sum(input, dim=reduce_axis)
denominator = ground_o + pred_o
if self.jaccard:
denominator = 2.0 * (denominator - intersection)
f: 'torch.Tensor' = 1.0 - (2.0 * intersection + self.smooth_nr) / (
denominator + self.smooth_dr)
if self.reduction == LossReduction.MEAN.value:
f = torch.mean(f)
elif self.reduction == LossReduction.SUM.value:
f = torch.sum(f)
elif self.reduction == LossReduction.NONE.value:
pass
else:
raise ValueError(
f'Unsupported reduction: {self.reduction}, available options are ["mean", "sum", "none"].'
)
return f
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import collections
from typing import Optional
from typing import Union
from typing import Any
from typing import Callable
from typing import Tuple
import torch.nn
from torch.nn.modules.loss import _Loss
from enum import Enum
import collections.abc
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + 16 * x0), xmask, other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp7 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp13 = tl.where(xmask, tmp11, 0)
tmp14 = tl.sum(tmp13, 1)[:, None]
tl.store(out_ptr0 + x0, tmp6, xmask)
tl.store(out_ptr1 + x0, tmp10, xmask)
tl.store(out_ptr2 + x0, tmp14, xmask)
@triton.jit
def triton_per_fused_add_div_mean_mul_rsub_1(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp5 = tl.load(in_ptr1 + r0, None)
tmp6 = tl.load(in_ptr2 + r0, None)
tmp1 = 2.0
tmp2 = tmp0 * tmp1
tmp3 = 1e-05
tmp4 = tmp2 + tmp3
tmp7 = tmp5 + tmp6
tmp8 = tmp7 + tmp3
tmp9 = tmp4 / tmp8
tmp10 = 1.0
tmp11 = tmp10 - tmp9
tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK])
tmp14 = tl.sum(tmp12, 1)[:, None]
tmp15 = 16.0
tmp16 = tmp14 / tmp15
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp16, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_mul_sum_0[grid(16)](arg1_1, arg0_1, buf0, buf1,
buf2, 16, 16, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf3
del buf3
triton_per_fused_add_div_mean_mul_rsub_1[grid(1)](buf4, buf0, buf1,
buf2, 1, 16, XBLOCK=1, num_warps=2, num_stages=1)
del buf0
del buf1
del buf2
return buf4,
def issequenceiterable(obj: 'Any') ->bool:
"""
Determine if the object is an iterable sequence and is not a string.
"""
if torch.is_tensor(obj):
return int(obj.dim()) > 0
return isinstance(obj, collections.abc.Iterable) and not isinstance(obj,
str)
def ensure_tuple(vals: 'Any') ->Tuple[Any, ...]:
"""
Returns a tuple of `vals`.
"""
if not issequenceiterable(vals):
vals = vals,
return tuple(vals)
def ensure_tuple_size(tup: 'Any', dim: 'int', pad_val: 'Any'=0) ->Tuple[Any,
...]:
"""
Returns a copy of `tup` with `dim` values by either shortened or padded with `pad_val` as necessary.
"""
tup = ensure_tuple(tup) + (pad_val,) * dim
return tuple(tup[:dim])
def one_hot(labels: 'torch.Tensor', num_classes: 'int', dtype:
'torch.dtype'=torch.float, dim: 'int'=1) ->torch.Tensor:
"""
For a tensor `labels` of dimensions B1[spatial_dims], return a tensor of dimensions `BN[spatial_dims]`
for `num_classes` N number of classes.
Example:
For every value v = labels[b,1,h,w], the value in the result at [b,v,h,w] will be 1 and all others 0.
Note that this will include the background label, thus a binary mask should be treated as having 2 classes.
"""
assert labels.dim() > 0, 'labels should have dim of 1 or more.'
if labels.ndim < dim + 1:
shape = ensure_tuple_size(labels.shape, dim + 1, 1)
labels = labels.reshape(*shape)
sh = list(labels.shape)
assert sh[dim
] == 1, 'labels should have a channel with length equals to one.'
sh[dim] = num_classes
o = torch.zeros(size=sh, dtype=dtype, device=labels.device)
labels = o.scatter_(dim=dim, index=labels.long(), value=1)
return labels
class LossReduction(Enum):
"""
See also:
- :py:class:`monai.losses.dice.DiceLoss`
- :py:class:`monai.losses.dice.GeneralizedDiceLoss`
- :py:class:`monai.losses.focal_loss.FocalLoss`
- :py:class:`monai.losses.tversky.TverskyLoss`
"""
NONE = 'none'
MEAN = 'mean'
SUM = 'sum'
class DiceLossNew(_Loss):
"""
Compute average Dice loss between two tensors. It can support both multi-classes and multi-labels tasks.
Input logits `input` (BNHW[D] where N is number of classes) is compared with ground truth `target` (BNHW[D]).
Axis N of `input` is expected to have logit predictions for each class rather than being image channels,
while the same axis of `target` can be 1 or N (one-hot format). The `smooth_nr` and `smooth_dr` parameters are
values added to the intersection and union components of the inter-over-union calculation to smooth results
respectively, these values should be small. The `include_background` class attribute can be set to False for
an instance of DiceLoss to exclude the first category (channel index 0) which is by convention assumed to be
background. If the non-background segmentations are small compared to the total image size they can get
overwhelmed by the signal from the background so excluding it in such cases helps convergence.
Milletari, F. et. al. (2016) V-Net: Fully Convolutional Neural Networks forVolumetric Medical Image Segmentation, 3DV, 2016.
"""
def __init__(self, include_background: 'bool'=True, to_onehot_y: 'bool'
=False, sigmoid: 'bool'=False, softmax: 'bool'=False, other_act:
'Optional[Callable]'=None, squared_pred: 'bool'=False, jaccard:
'bool'=False, reduction: 'Union[LossReduction, str]'=LossReduction.
MEAN, smooth_nr: 'float'=1e-05, smooth_dr: 'float'=1e-05, batch:
'bool'=False) ->None:
"""
Args:
include_background: if False channel index 0 (background category) is excluded from the calculation.
to_onehot_y: whether to convert `y` into the one-hot format. Defaults to False.
sigmoid: if True, apply a sigmoid function to the prediction.
softmax: if True, apply a softmax function to the prediction.
other_act: if don't want to use `sigmoid` or `softmax`, use other callable function to execute
other activation layers, Defaults to ``None``. for example:
`other_act = torch.tanh`.
squared_pred: use squared versions of targets and predictions in the denominator or not.
jaccard: compute Jaccard Index (soft IoU) instead of dice or not.
reduction: {``"none"``, ``"mean"``, ``"sum"``}
Specifies the reduction to apply to the output. Defaults to ``"mean"``.
- ``"none"``: no reduction will be applied.
- ``"mean"``: the sum of the output will be divided by the number of elements in the output.
- ``"sum"``: the output will be summed.
smooth_nr: a small constant added to the numerator to avoid zero.
smooth_dr: a small constant added to the denominator to avoid nan.
batch: whether to sum the intersection and union areas over the batch dimension before the dividing.
Defaults to False, a Dice loss value is computed independently from each item in the batch
before any `reduction`.
Raises:
TypeError: When ``other_act`` is not an ``Optional[Callable]``.
ValueError: When more than 1 of [``sigmoid=True``, ``softmax=True``, ``other_act is not None``].
Incompatible values.
"""
super().__init__(reduction=LossReduction(reduction).value)
if other_act is not None and not callable(other_act):
raise TypeError(
f'other_act must be None or callable but is {type(other_act).__name__}.'
)
if int(sigmoid) + int(softmax) + int(other_act is not None) > 1:
raise ValueError(
'Incompatible values: more than 1 of [sigmoid=True, softmax=True, other_act is not None].'
)
self.include_background = include_background
self.to_onehot_y = to_onehot_y
self.sigmoid = sigmoid
self.softmax = softmax
self.other_act = other_act
self.squared_pred = squared_pred
self.jaccard = jaccard
self.smooth_nr = float(smooth_nr)
self.smooth_dr = float(smooth_dr)
self.batch = batch
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| danielschulz/MONAI | DiceLoss | false | 1,785 | [
"Apache-2.0"
] | 0 | 54ef6e9e700f0de3d50184c0148f953be871a58e | https://github.com/danielschulz/MONAI/tree/54ef6e9e700f0de3d50184c0148f953be871a58e | import collections
import torch
import warnings
from typing import Optional
from typing import Union
from typing import Any
from typing import Callable
from typing import Tuple
import torch.nn
from torch.nn.modules.loss import _Loss
from enum import Enum
import collections.abc
def issequenceiterable(obj: 'Any') ->bool:
"""
Determine if the object is an iterable sequence and is not a string.
"""
if torch.is_tensor(obj):
return int(obj.dim()) > 0
return isinstance(obj, collections.abc.Iterable) and not isinstance(obj,
str)
def ensure_tuple(vals: 'Any') ->Tuple[Any, ...]:
"""
Returns a tuple of `vals`.
"""
if not issequenceiterable(vals):
vals = vals,
return tuple(vals)
def ensure_tuple_size(tup: 'Any', dim: 'int', pad_val: 'Any'=0) ->Tuple[Any,
...]:
"""
Returns a copy of `tup` with `dim` values by either shortened or padded with `pad_val` as necessary.
"""
tup = ensure_tuple(tup) + (pad_val,) * dim
return tuple(tup[:dim])
def one_hot(labels: 'torch.Tensor', num_classes: 'int', dtype:
'torch.dtype'=torch.float, dim: 'int'=1) ->torch.Tensor:
"""
For a tensor `labels` of dimensions B1[spatial_dims], return a tensor of dimensions `BN[spatial_dims]`
for `num_classes` N number of classes.
Example:
For every value v = labels[b,1,h,w], the value in the result at [b,v,h,w] will be 1 and all others 0.
Note that this will include the background label, thus a binary mask should be treated as having 2 classes.
"""
assert labels.dim() > 0, 'labels should have dim of 1 or more.'
if labels.ndim < dim + 1:
shape = ensure_tuple_size(labels.shape, dim + 1, 1)
labels = labels.reshape(*shape)
sh = list(labels.shape)
assert sh[dim
] == 1, 'labels should have a channel with length equals to one.'
sh[dim] = num_classes
o = torch.zeros(size=sh, dtype=dtype, device=labels.device)
labels = o.scatter_(dim=dim, index=labels.long(), value=1)
return labels
class LossReduction(Enum):
"""
See also:
- :py:class:`monai.losses.dice.DiceLoss`
- :py:class:`monai.losses.dice.GeneralizedDiceLoss`
- :py:class:`monai.losses.focal_loss.FocalLoss`
- :py:class:`monai.losses.tversky.TverskyLoss`
"""
NONE = 'none'
MEAN = 'mean'
SUM = 'sum'
class Model(_Loss):
"""
Compute average Dice loss between two tensors. It can support both multi-classes and multi-labels tasks.
Input logits `input` (BNHW[D] where N is number of classes) is compared with ground truth `target` (BNHW[D]).
Axis N of `input` is expected to have logit predictions for each class rather than being image channels,
while the same axis of `target` can be 1 or N (one-hot format). The `smooth_nr` and `smooth_dr` parameters are
values added to the intersection and union components of the inter-over-union calculation to smooth results
respectively, these values should be small. The `include_background` class attribute can be set to False for
an instance of DiceLoss to exclude the first category (channel index 0) which is by convention assumed to be
background. If the non-background segmentations are small compared to the total image size they can get
overwhelmed by the signal from the background so excluding it in such cases helps convergence.
Milletari, F. et. al. (2016) V-Net: Fully Convolutional Neural Networks forVolumetric Medical Image Segmentation, 3DV, 2016.
"""
def __init__(self, include_background: 'bool'=True, to_onehot_y: 'bool'
=False, sigmoid: 'bool'=False, softmax: 'bool'=False, other_act:
'Optional[Callable]'=None, squared_pred: 'bool'=False, jaccard:
'bool'=False, reduction: 'Union[LossReduction, str]'=LossReduction.
MEAN, smooth_nr: 'float'=1e-05, smooth_dr: 'float'=1e-05, batch:
'bool'=False) ->None:
"""
Args:
include_background
# ... truncated (>4000 chars) for memory efficiency |
TorchJaccardLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/25/c25ufneom4ll6dh7ho4wez5gfep4fqatmeaw4kaoqduccprkytrj.py
# Topologically Sorted Source Nodes: [jaccard_output, eq, jaccard_target, mul, intersection, add_1, sum_2, sum_3, union, sub, add_2, jaccard_score, loss], Original ATen: [aten.sigmoid, aten.eq, aten._to_copy, aten.mul, aten.sum, aten.add, aten.sub, aten.div, aten.rsub]
# Source node to ATen node mapping:
# add_1 => add_1
# add_2 => add_2
# eq => eq
# intersection => sum_1
# jaccard_output => sigmoid
# jaccard_score => div
# jaccard_target => convert_element_type
# loss => sub_1
# mul => mul
# sub => sub
# sum_2 => sum_2
# sum_3 => sum_3
# union => add
# Graph fragment:
# %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%arg1_1,), kwargs = {})
# %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%arg0_1, 1), kwargs = {})
# %convert_element_type : [num_users=2] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%eq, torch.float32), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %convert_element_type), kwargs = {})
# %sum_1 : [num_users=2] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1e-15), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%sigmoid,), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%convert_element_type,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, %sum_3), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %sum_1), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub, 1e-15), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_1, %add_2), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %div), kwargs = {})
triton_per_fused__to_copy_add_div_eq_mul_rsub_sigmoid_sub_sum_0 = async_compile.triton('triton_per_fused__to_copy_add_div_eq_mul_rsub_sigmoid_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__to_copy_add_div_eq_mul_rsub_sigmoid_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__to_copy_add_div_eq_mul_rsub_sigmoid_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr2, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp2 = tl.load(in_ptr1 + (r0), None)
tmp1 = tl.sigmoid(tmp0)
tmp3 = 1.0
tmp4 = tmp2 == tmp3
tmp5 = tmp4.to(tl.float32)
tmp6 = tmp1 * tmp5
tmp7 = tl.broadcast_to(tmp6, [RBLOCK])
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0))
tmp10 = tl.broadcast_to(tmp1, [RBLOCK])
tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0))
tmp13 = tl.broadcast_to(tmp5, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = 1e-15
tmp17 = tmp9 + tmp16
tmp18 = tmp12 + tmp15
tmp19 = tmp18 - tmp9
tmp20 = tmp19 + tmp16
tmp21 = tmp17 / tmp20
tmp22 = tmp3 - tmp21
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp21, None)
tl.store(out_ptr2 + (tl.full([1], 0, tl.int32)), tmp22, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = buf0; del buf0 # reuse
buf4 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [jaccard_output, eq, jaccard_target, mul, intersection, add_1, sum_2, sum_3, union, sub, add_2, jaccard_score, loss], Original ATen: [aten.sigmoid, aten.eq, aten._to_copy, aten.mul, aten.sum, aten.add, aten.sub, aten.div, aten.rsub]
stream0 = get_raw_stream(0)
triton_per_fused__to_copy_add_div_eq_mul_rsub_sigmoid_sub_sum_0.run(buf3, arg1_1, arg0_1, buf4, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf4, buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
class TorchJaccardLoss(torch.nn.modules.Module):
def __init__(self):
super(TorchJaccardLoss, self).__init__()
def forward(self, outputs, targets):
eps = 1e-15
jaccard_target = (targets == 1).float()
jaccard_output = torch.sigmoid(outputs)
intersection = (jaccard_output * jaccard_target).sum()
union = jaccard_output.sum() + jaccard_target.sum()
jaccard_score = (intersection + eps) / (union - intersection + eps)
self._stash_jaccard = jaccard_score
loss = 1.0 - jaccard_score
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused__to_copy_add_div_eq_mul_rsub_sigmoid_sub_sum_0(in_out_ptr0
, in_ptr0, in_ptr1, out_ptr2, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp2 = tl.load(in_ptr1 + r0, None)
tmp1 = tl.sigmoid(tmp0)
tmp3 = 1.0
tmp4 = tmp2 == tmp3
tmp5 = tmp4.to(tl.float32)
tmp6 = tmp1 * tmp5
tmp7 = tl.broadcast_to(tmp6, [RBLOCK])
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0))
tmp10 = tl.broadcast_to(tmp1, [RBLOCK])
tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0))
tmp13 = tl.broadcast_to(tmp5, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = 1e-15
tmp17 = tmp9 + tmp16
tmp18 = tmp12 + tmp15
tmp19 = tmp18 - tmp9
tmp20 = tmp19 + tmp16
tmp21 = tmp17 / tmp20
tmp22 = tmp3 - tmp21
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp21, None)
tl.store(out_ptr2 + tl.full([1], 0, tl.int32), tmp22, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = buf0
del buf0
buf4 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused__to_copy_add_div_eq_mul_rsub_sigmoid_sub_sum_0[grid(1)
](buf3, arg1_1, arg0_1, buf4, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf4, buf3
class TorchJaccardLossNew(torch.nn.modules.Module):
def __init__(self):
super(TorchJaccardLossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| dannyjeck-matroid/solaris | TorchJaccardLoss | false | 1,786 | [
"Apache-2.0"
] | 0 | 463d220c1fe14f811cbbbf528a7353022538006e | https://github.com/dannyjeck-matroid/solaris/tree/463d220c1fe14f811cbbbf528a7353022538006e | import torch
class Model(torch.nn.modules.Module):
def __init__(self):
super().__init__()
def forward(self, outputs, targets):
eps = 1e-15
jaccard_target = (targets == 1).float()
jaccard_output = torch.sigmoid(outputs)
intersection = (jaccard_output * jaccard_target).sum()
union = jaccard_output.sum() + jaccard_target.sum()
jaccard_score = (intersection + eps) / (union - intersection + eps)
self._stash_jaccard = jaccard_score
loss = 1.0 - jaccard_score
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
Normalize3D | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/zd/czdj4r6wcdeivrwn6uzoijmjffp3m3fzkkhyj6jtjlm77tmlmqgo.py
# Topologically Sorted Source Nodes: [max_1, min_1, sub, sub_1, add, X_1], Original ATen: [aten.max, aten.min, aten.sub, aten.add, aten.div]
# Source node to ATen node mapping:
# X_1 => div
# add => add
# max_1 => max_1
# min_1 => min_1
# sub => sub
# sub_1 => sub_1
# Graph fragment:
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%view, 1), kwargs = {})
# %min_1 : [num_users=1] = call_function[target=torch.ops.aten.min.dim](args = (%view, 1), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %unsqueeze_1), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%unsqueeze, %unsqueeze_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_1, 1e-10), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %add), kwargs = {})
triton_per_fused_add_div_max_min_sub_0 = async_compile.triton('triton_per_fused_add_div_max_min_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_max_min_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_max_min_sub_0(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float("-inf"))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp6 = tl.where(xmask, tmp1, float("inf"))
tmp7 = triton_helpers.min2(tmp6, 1)[:, None]
tmp8 = tmp0 - tmp7
tmp9 = tmp4 - tmp7
tmp10 = 1e-10
tmp11 = tmp9 + tmp10
tmp12 = tmp8 / tmp11
tl.store(out_ptr2 + (r1 + (16*x0)), tmp12, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf4 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [max_1, min_1, sub, sub_1, add, X_1], Original ATen: [aten.max, aten.min, aten.sub, aten.add, aten.div]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_max_min_sub_0.run(arg0_1, buf4, 4, 16, grid=grid(4), stream=stream0)
del arg0_1
return (reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Normalize3D(nn.Module):
"""
Scale Spectrogram to be between 0 and 1
"""
def __init__(self):
super(Normalize3D, self).__init__()
def forward(self, X: 'torch.Tensor'):
if len(X.shape) != 3:
raise ValueError(
'Input should be 3D: [batch_size X num_features X num_steps]')
batch_size, num_features, num_steps = X.shape
X = X.contiguous().view(batch_size, num_features * num_steps)
max_value = torch.max(X, dim=1)[0].detach()
min_value = torch.min(X, dim=1)[0].detach()
max_value = torch.unsqueeze(max_value, 1)
min_value = torch.unsqueeze(min_value, 1)
X = (X - min_value) / (max_value - min_value + 1e-10)
return X.view(batch_size, num_features, num_steps)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_add_div_max_min_sub_0(in_ptr0, out_ptr2, xnumel,
rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp6 = tl.where(xmask, tmp1, float('inf'))
tmp7 = triton_helpers.min2(tmp6, 1)[:, None]
tmp8 = tmp0 - tmp7
tmp9 = tmp4 - tmp7
tmp10 = 1e-10
tmp11 = tmp9 + tmp10
tmp12 = tmp8 / tmp11
tl.store(out_ptr2 + (r1 + 16 * x0), tmp12, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf4 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_add_div_max_min_sub_0[grid(4)](arg0_1, buf4, 4, 16,
XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
return reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 0),
class Normalize3DNew(nn.Module):
"""
Scale Spectrogram to be between 0 and 1
"""
def __init__(self):
super(Normalize3DNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| dariocazzani/vo-id | Normalize3D | false | 1,787 | [
"MIT"
] | 0 | 41d0f2779e7909cfa15afcb6c8222c48a5855eb8 | https://github.com/dariocazzani/vo-id/tree/41d0f2779e7909cfa15afcb6c8222c48a5855eb8 | import torch
import torch.nn as nn
class Model(nn.Module):
"""
Scale Spectrogram to be between 0 and 1
"""
def __init__(self):
super().__init__()
def forward(self, X: 'torch.Tensor'):
if len(X.shape) != 3:
raise ValueError(
'Input should be 3D: [batch_size X num_features X num_steps]')
batch_size, num_features, num_steps = X.shape
X = X.contiguous().view(batch_size, num_features * num_steps)
max_value = torch.max(X, dim=1)[0].detach()
min_value = torch.min(X, dim=1)[0].detach()
max_value = torch.unsqueeze(max_value, 1)
min_value = torch.unsqueeze(min_value, 1)
X = (X - min_value) / (max_value - min_value + 1e-10)
return X.view(batch_size, num_features, num_steps)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return []
|
BertPooler | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/f6/cf6eq52ahc4puheryyxca2jybiwdoldurzjpl5qhir6vax7kjj3d.py
# Topologically Sorted Source Nodes: [pooled_output], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# pooled_output => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%select,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (64 + x0 + (80*x1)), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/2g/c2gw7362i2a6wsfdx2sxyywx4o6ronjg6goebvdn44w6gpjsxpbc.py
# Topologically Sorted Source Nodes: [pooled_output, pooled_output_1], Original ATen: [aten.add, aten.tanh]
# Source node to ATen node mapping:
# pooled_output => add
# pooled_output_1 => tanh
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_3), kwargs = {})
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%add,), kwargs = {})
triton_poi_fused_add_tanh_1 = async_compile.triton('triton_poi_fused_add_tanh_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_tanh_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 5, 4, 4), (80, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pooled_output], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(primals_1, buf0, 64, grid=grid(64), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pooled_output], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
del primals_2
buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [pooled_output, pooled_output_1], Original ATen: [aten.add, aten.tanh]
triton_poi_fused_add_tanh_1.run(buf2, primals_3, 64, grid=grid(64), stream=stream0)
del primals_3
return (buf2, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 5, 4, 4), (80, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import torch
from torch import nn
import torch.onnx
class BertPooler(nn.Module):
def __init__(self, config):
super(BertPooler, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
self.cls_position = config.mask_tokens
def forward(self, hidden_states):
first_token_tensor = hidden_states[:, self.cls_position]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
def get_inputs():
return [torch.rand([4, 5, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidden_size=4, mask_tokens=4)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
import torch.onnx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (64 + x0 + 80 * x1), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 5, 4, 4), (80, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64)](primals_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
del primals_2
buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0)
del buf1
triton_poi_fused_add_tanh_1[grid(64)](buf2, primals_3, 64, XBLOCK=
64, num_warps=1, num_stages=1)
del primals_3
return buf2, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), buf2
class BertPoolerNew(nn.Module):
def __init__(self, config):
super(BertPoolerNew, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
self.cls_position = config.mask_tokens
def forward(self, input_0):
primals_2 = self.dense.weight
primals_3 = self.dense.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| Alwaysproblem/examples-1 | BertPooler | false | 1,788 | [
"MIT"
] | 0 | 9754fa63ed1931489a21ac1f5b299f945e369a5c | https://github.com/Alwaysproblem/examples-1/tree/9754fa63ed1931489a21ac1f5b299f945e369a5c | from _paritybench_helpers import _mock_config
import torch
from torch import nn
import torch.onnx
class Model(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
self.cls_position = config.mask_tokens
def forward(self, hidden_states):
first_token_tensor = hidden_states[:, self.cls_position]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
def get_inputs():
return [torch.rand([4, 5, 4, 4])]
def get_init_inputs():
return []
|
CriticNet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/nq/cnqjufcqn3ur3s7xvlb2i747nyf24md4zaiatlwgkasynplfjstu.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (64, 4), (4, 1))
assert_size_stride(primals_2, (64, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (64, 64), (64, 1))
assert_size_stride(primals_5, (64, ), (1, ))
assert_size_stride(primals_6, (1, 64), (64, 1))
assert_size_stride(primals_7, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0); del buf0 # reuse
buf7 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf7, 4096, grid=grid(4096), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(primals_4, (64, 64), (1, 64), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 64), (1024, 256, 64, 1), 0); del buf2 # reuse
buf6 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf6, 4096, grid=grid(4096), stream=stream0)
del primals_5
buf5 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [v], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 64), (64, 1), 0), reinterpret_tensor(primals_6, (64, 1), (1, 64), 0), alpha=1, beta=1, out=buf5)
del primals_7
return (reinterpret_tensor(buf5, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(buf3, (64, 64), (64, 1), 0), primals_6, buf6, primals_4, buf7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((64, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, 64), (64, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, 64), (64, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class CriticNet(nn.Module):
"""Critic (Value estimator) Model."""
def __init__(self, state_size, action_size, seed, fc1_units=64,
fc2_units=64):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fc1_units (int): Number of nodes in first hidden layer
fc2_units (int): Number of nodes in second hidden layer
"""
super().__init__()
None
self.seed = torch.manual_seed(seed)
self.fc1 = nn.Linear(state_size, fc1_units)
self.fc2 = nn.Linear(fc1_units, fc2_units)
self.fc3 = nn.Linear(fc2_units, 1)
def forward(self, state):
"""Build a network that maps state -> expected Q values."""
x = F.relu(self.fc1(state))
x = F.relu(self.fc2(x))
v = self.fc3(x)
return v.squeeze()
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'state_size': 4, 'action_size': 4, 'seed': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (64, 4), (4, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (64, 64), (64, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (1, 64), (64, 1))
assert_size_stride(primals_7, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 64), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 64), (1024, 256, 64, 1), 0)
del buf0
buf7 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool
)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(4096)](buf1,
primals_2, buf7, 4096, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 64), (64, 1), 0),
reinterpret_tensor(primals_4, (64, 64), (1, 64), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 64), (1024, 256, 64, 1), 0)
del buf2
buf6 = empty_strided_cuda((4, 4, 4, 64), (1024, 256, 64, 1), torch.bool
)
triton_poi_fused_relu_threshold_backward_0[grid(4096)](buf3,
primals_5, buf6, 4096, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 64),
(64, 1), 0), reinterpret_tensor(primals_6, (64, 1), (1, 64), 0),
alpha=1, beta=1, out=buf5)
del primals_7
return reinterpret_tensor(buf5, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 64), (64, 1), 0), reinterpret_tensor(
buf3, (64, 64), (64, 1), 0), primals_6, buf6, primals_4, buf7
class CriticNetNew(nn.Module):
"""Critic (Value estimator) Model."""
def __init__(self, state_size, action_size, seed, fc1_units=64,
fc2_units=64):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fc1_units (int): Number of nodes in first hidden layer
fc2_units (int): Number of nodes in second hidden layer
"""
super().__init__()
None
self.seed = torch.manual_seed(seed)
self.fc1 = nn.Linear(state_size, fc1_units)
self.fc2 = nn.Linear(fc1_units, fc2_units)
self.fc3 = nn.Linear(fc2_units, 1)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| danthe42/drlnd_p2 | CriticNet | false | 1,789 | [
"MIT"
] | 0 | 693813feb7c99f3e01da583e5b67e4f8904639c4 | https://github.com/danthe42/drlnd_p2/tree/693813feb7c99f3e01da583e5b67e4f8904639c4 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
"""Critic (Value estimator) Model."""
def __init__(self, state_size, action_size, seed, fc1_units=64,
fc2_units=64):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fc1_units (int): Number of nodes in first hidden layer
fc2_units (int): Number of nodes in second hidden layer
"""
super().__init__()
None
self.seed = torch.manual_seed(seed)
self.fc1 = nn.Linear(state_size, fc1_units)
self.fc2 = nn.Linear(fc1_units, fc2_units)
self.fc3 = nn.Linear(fc2_units, 1)
def forward(self, state):
"""Build a network that maps state -> expected Q values."""
x = F.relu(self.fc1(state))
x = F.relu(self.fc2(x))
v = self.fc3(x)
return v.squeeze()
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
SetConv | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/by/cbyavqfpm6cctsjn76scubidnajec26whr355vus7lq6jaaa5rcx.py
# Topologically Sorted Source Nodes: [hid_sample], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# hid_sample => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_6 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/vq/cvqwwsgn7w5hog5arxcl3upsq3zp4jydsf32ey4rg27yhonfi7tj.py
# Topologically Sorted Source Nodes: [hid_sample_1, hid_sample_2, hid_sample_3], Original ATen: [aten.relu, aten.mul, aten.sum]
# Source node to ATen node mapping:
# hid_sample_1 => relu_1
# hid_sample_2 => mul
# hid_sample_3 => sum_1
# Graph fragment:
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%relu_1, %primals_6), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {})
triton_poi_fused_mul_relu_sum_1 = async_compile.triton('triton_poi_fused_mul_relu_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_relu_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_relu_sum_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + (x0 + (16*x1)), xmask)
tmp7 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask)
tmp10 = tl.load(in_ptr2 + (4 + x0 + (16*x1)), xmask)
tmp13 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask)
tmp16 = tl.load(in_ptr2 + (8 + x0 + (16*x1)), xmask)
tmp19 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask)
tmp22 = tl.load(in_ptr2 + (12 + x0 + (16*x1)), xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = tmp4 * tmp5
tmp8 = tmp7 + tmp1
tmp9 = triton_helpers.maximum(tmp3, tmp8)
tmp11 = tmp9 * tmp10
tmp12 = tmp6 + tmp11
tmp14 = tmp13 + tmp1
tmp15 = triton_helpers.maximum(tmp3, tmp14)
tmp17 = tmp15 * tmp16
tmp18 = tmp12 + tmp17
tmp20 = tmp19 + tmp1
tmp21 = triton_helpers.maximum(tmp3, tmp20)
tmp23 = tmp21 * tmp22
tmp24 = tmp18 + tmp23
tl.store(out_ptr0 + (x2), tmp24, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/gw/cgwic2wf3qa56zynkuy7urfcpffc2eu5t63gjm6uarwrwg6jvks7.py
# Topologically Sorted Source Nodes: [hid], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# hid => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%div, %div_1, %div_2], 1), kwargs = {})
triton_poi_fused_cat_2 = async_compile.triton('triton_poi_fused_cat_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 15, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 12
x1 = (xindex // 12)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + ((16*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tl.load(in_ptr1 + (4 + (16*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = tl.load(in_ptr1 + (8 + (16*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tmp8 + tmp9
tmp11 = tl.load(in_ptr1 + (12 + (16*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp12 = tmp10 + tmp11
tmp13 = tmp5 / tmp12
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp4, tmp13, tmp14)
tmp16 = tmp0 >= tmp3
tmp17 = tl.full([1], 8, tl.int64)
tmp18 = tmp0 < tmp17
tmp19 = tmp16 & tmp18
tmp20 = tl.load(in_ptr2 + ((4*x1) + ((-4) + x0)), tmp19 & xmask, eviction_policy='evict_last', other=0.0)
tmp21 = tl.load(in_ptr3 + ((16*x1) + ((-4) + x0)), tmp19 & xmask, eviction_policy='evict_last', other=0.0)
tmp22 = tl.load(in_ptr3 + (4 + (16*x1) + ((-4) + x0)), tmp19 & xmask, eviction_policy='evict_last', other=0.0)
tmp23 = tmp21 + tmp22
tmp24 = tl.load(in_ptr3 + (8 + (16*x1) + ((-4) + x0)), tmp19 & xmask, eviction_policy='evict_last', other=0.0)
tmp25 = tmp23 + tmp24
tmp26 = tl.load(in_ptr3 + (12 + (16*x1) + ((-4) + x0)), tmp19 & xmask, eviction_policy='evict_last', other=0.0)
tmp27 = tmp25 + tmp26
tmp28 = tmp20 / tmp27
tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype)
tmp30 = tl.where(tmp19, tmp28, tmp29)
tmp31 = tmp0 >= tmp17
tmp32 = tl.full([1], 12, tl.int64)
tmp33 = tmp0 < tmp32
tmp34 = tl.load(in_ptr4 + ((4*x1) + ((-8) + x0)), tmp31 & xmask, eviction_policy='evict_last', other=0.0)
tmp35 = tl.load(in_ptr5 + ((16*x1) + ((-8) + x0)), tmp31 & xmask, eviction_policy='evict_last', other=0.0)
tmp36 = tl.load(in_ptr5 + (4 + (16*x1) + ((-8) + x0)), tmp31 & xmask, eviction_policy='evict_last', other=0.0)
tmp37 = tmp35 + tmp36
tmp38 = tl.load(in_ptr5 + (8 + (16*x1) + ((-8) + x0)), tmp31 & xmask, eviction_policy='evict_last', other=0.0)
tmp39 = tmp37 + tmp38
tmp40 = tl.load(in_ptr5 + (12 + (16*x1) + ((-8) + x0)), tmp31 & xmask, eviction_policy='evict_last', other=0.0)
tmp41 = tmp39 + tmp40
tmp42 = tmp34 / tmp41
tmp43 = tl.full(tmp42.shape, 0.0, tmp42.dtype)
tmp44 = tl.where(tmp31, tmp42, tmp43)
tmp45 = tl.where(tmp19, tmp30, tmp44)
tmp46 = tl.where(tmp4, tmp15, tmp45)
tl.store(out_ptr0 + (x2), tmp46, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/bh/cbh6nn7gkahdovv3eaz57lyjbw3lw7agupcxl6g54cytbdxy5y57.py
# Topologically Sorted Source Nodes: [hid_1], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# hid_1 => relu_6
# Graph fragment:
# %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_20), kwargs = {})
# %relu_6 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {})
triton_poi_fused_relu_3 = async_compile.triton('triton_poi_fused_relu_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/ub/cubdt4klg6eksq4s777k5y2toe23divpkhrifhe2wzbelcrnsvya.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# out => sigmoid
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_22), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_sigmoid_4 = async_compile.triton('triton_poi_fused_sigmoid_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/ql/cqlkizbtc5ckgiftunxao47hductoqdvbrd2mxnieseagrgndyso.py
# Topologically Sorted Source Nodes: [hid_join_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# hid_join_1 => relu_5
# Graph fragment:
# %relu_5 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_11,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_5, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_5 = async_compile.triton('triton_poi_fused_relu_threshold_backward_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4, ), (1, ))
assert_size_stride(primals_12, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_13, (4, 4), (4, 1))
assert_size_stride(primals_14, (4, ), (1, ))
assert_size_stride(primals_15, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_16, (4, 4), (4, 1))
assert_size_stride(primals_17, (4, ), (1, ))
assert_size_stride(primals_18, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_19, (4, 12), (12, 1))
assert_size_stride(primals_20, (4, ), (1, ))
assert_size_stride(primals_21, (1, 4), (4, 1))
assert_size_stride(primals_22, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0); del buf0 # reuse
buf22 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [hid_sample], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf22, 64, grid=grid(64), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [hid_sample_1, hid_sample_2, hid_sample_3], Original ATen: [aten.relu, aten.mul, aten.sum]
triton_poi_fused_mul_relu_sum_1.run(buf2, primals_5, primals_6, buf3, 16, grid=grid(16), stream=stream0)
buf4 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_9, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf4)
del primals_7
buf5 = reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 0); del buf4 # reuse
buf20 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [hid_predicate], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf5, primals_8, buf20, 64, grid=grid(64), stream=stream0)
del primals_8
buf6 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf5, (16, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf6)
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [hid_predicate_1, hid_predicate_2, hid_predicate_3], Original ATen: [aten.relu, aten.mul, aten.sum]
triton_poi_fused_mul_relu_sum_1.run(buf6, primals_11, primals_12, buf7, 16, grid=grid(16), stream=stream0)
buf8 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_15, (16, 4), (4, 1), 0), reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), out=buf8)
del primals_13
buf9 = reinterpret_tensor(buf8, (4, 4, 4), (16, 4, 1), 0); del buf8 # reuse
buf18 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [hid_join], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf9, primals_14, buf18, 64, grid=grid(64), stream=stream0)
del primals_14
buf10 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf9, (16, 4), (4, 1), 0), reinterpret_tensor(primals_16, (4, 4), (1, 4), 0), out=buf10)
buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [hid_join_1, hid_join_2, hid_join_3], Original ATen: [aten.relu, aten.mul, aten.sum]
triton_poi_fused_mul_relu_sum_1.run(buf10, primals_17, primals_18, buf11, 16, grid=grid(16), stream=stream0)
buf12 = empty_strided_cuda((4, 12), (12, 1), torch.float32)
# Topologically Sorted Source Nodes: [hid], Original ATen: [aten.cat]
triton_poi_fused_cat_2.run(buf3, primals_6, buf7, primals_12, buf11, primals_18, buf12, 48, grid=grid(48), stream=stream0)
del buf11
del buf3
buf13 = buf7; del buf7 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf12, reinterpret_tensor(primals_19, (12, 4), (1, 12), 0), out=buf13)
buf14 = buf13; del buf13 # reuse
# Topologically Sorted Source Nodes: [hid_1], Original ATen: [aten.relu]
triton_poi_fused_relu_3.run(buf14, primals_20, 16, grid=grid(16), stream=stream0)
del primals_20
buf15 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf14, reinterpret_tensor(primals_21, (4, 1), (1, 4), 0), out=buf15)
buf16 = buf15; del buf15 # reuse
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.sigmoid]
triton_poi_fused_sigmoid_4.run(buf16, primals_22, 4, grid=grid(4), stream=stream0)
del primals_22
buf17 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [hid_join_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_5.run(buf10, primals_17, buf17, 64, grid=grid(64), stream=stream0)
del buf10
del primals_17
buf19 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [hid_predicate_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_5.run(buf6, primals_11, buf19, 64, grid=grid(64), stream=stream0)
del buf6
del primals_11
buf21 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [hid_sample_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_5.run(buf2, primals_5, buf21, 64, grid=grid(64), stream=stream0)
del buf2
del primals_5
return (buf16, primals_6, primals_12, primals_18, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(buf1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (16, 4), (4, 1), 0), reinterpret_tensor(buf5, (16, 4), (4, 1), 0), reinterpret_tensor(primals_15, (16, 4), (4, 1), 0), reinterpret_tensor(buf9, (16, 4), (4, 1), 0), buf12, buf14, buf16, primals_21, primals_19, buf17, primals_16, buf18, buf19, primals_10, buf20, buf21, primals_4, buf22, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((4, 12), (12, 1), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_21 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_22 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch.nn import functional as F
import torch.nn as nn
class SetConv(nn.Module):
def __init__(self, sample_feats, predicate_feats, join_feats, hid_units):
super(SetConv, self).__init__()
self.sample_mlp1 = nn.Linear(sample_feats, hid_units)
self.sample_mlp2 = nn.Linear(hid_units, hid_units)
self.predicate_mlp1 = nn.Linear(predicate_feats, hid_units)
self.predicate_mlp2 = nn.Linear(hid_units, hid_units)
self.join_mlp1 = nn.Linear(join_feats, hid_units)
self.join_mlp2 = nn.Linear(hid_units, hid_units)
self.out_mlp1 = nn.Linear(hid_units * 3, hid_units)
self.out_mlp2 = nn.Linear(hid_units, 1)
def forward(self, samples, predicates, joins, sample_mask,
predicate_mask, join_mask):
hid_sample = F.relu(self.sample_mlp1(samples))
hid_sample = F.relu(self.sample_mlp2(hid_sample))
hid_sample = hid_sample * sample_mask
hid_sample = torch.sum(hid_sample, dim=1, keepdim=False)
sample_norm = sample_mask.sum(1, keepdim=False)
hid_sample = hid_sample / sample_norm
hid_predicate = F.relu(self.predicate_mlp1(predicates))
hid_predicate = F.relu(self.predicate_mlp2(hid_predicate))
hid_predicate = hid_predicate * predicate_mask
hid_predicate = torch.sum(hid_predicate, dim=1, keepdim=False)
predicate_norm = predicate_mask.sum(1, keepdim=False)
hid_predicate = hid_predicate / predicate_norm
hid_join = F.relu(self.join_mlp1(joins))
hid_join = F.relu(self.join_mlp2(hid_join))
hid_join = hid_join * join_mask
hid_join = torch.sum(hid_join, dim=1, keepdim=False)
join_norm = join_mask.sum(1, keepdim=False)
hid_join = hid_join / join_norm
hid = torch.cat((hid_sample, hid_predicate, hid_join), 1)
hid = F.relu(self.out_mlp1(hid))
out = torch.sigmoid(self.out_mlp2(hid))
return out
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4,
4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4,
4])]
def get_init_inputs():
return [[], {'sample_feats': 4, 'predicate_feats': 4, 'join_feats': 4,
'hid_units': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_mul_relu_sum_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + (x0 + 16 * x1), xmask)
tmp7 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp10 = tl.load(in_ptr2 + (4 + x0 + 16 * x1), xmask)
tmp13 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp16 = tl.load(in_ptr2 + (8 + x0 + 16 * x1), xmask)
tmp19 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp22 = tl.load(in_ptr2 + (12 + x0 + 16 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = tmp4 * tmp5
tmp8 = tmp7 + tmp1
tmp9 = triton_helpers.maximum(tmp3, tmp8)
tmp11 = tmp9 * tmp10
tmp12 = tmp6 + tmp11
tmp14 = tmp13 + tmp1
tmp15 = triton_helpers.maximum(tmp3, tmp14)
tmp17 = tmp15 * tmp16
tmp18 = tmp12 + tmp17
tmp20 = tmp19 + tmp1
tmp21 = triton_helpers.maximum(tmp3, tmp20)
tmp23 = tmp21 * tmp22
tmp24 = tmp18 + tmp23
tl.store(out_ptr0 + x2, tmp24, xmask)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 12
x1 = xindex // 12
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + (16 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp7 = tl.load(in_ptr1 + (4 + 16 * x1 + x0), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = tl.load(in_ptr1 + (8 + 16 * x1 + x0), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tmp8 + tmp9
tmp11 = tl.load(in_ptr1 + (12 + 16 * x1 + x0), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp12 = tmp10 + tmp11
tmp13 = tmp5 / tmp12
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp4, tmp13, tmp14)
tmp16 = tmp0 >= tmp3
tmp17 = tl.full([1], 8, tl.int64)
tmp18 = tmp0 < tmp17
tmp19 = tmp16 & tmp18
tmp20 = tl.load(in_ptr2 + (4 * x1 + (-4 + x0)), tmp19 & xmask,
eviction_policy='evict_last', other=0.0)
tmp21 = tl.load(in_ptr3 + (16 * x1 + (-4 + x0)), tmp19 & xmask,
eviction_policy='evict_last', other=0.0)
tmp22 = tl.load(in_ptr3 + (4 + 16 * x1 + (-4 + x0)), tmp19 & xmask,
eviction_policy='evict_last', other=0.0)
tmp23 = tmp21 + tmp22
tmp24 = tl.load(in_ptr3 + (8 + 16 * x1 + (-4 + x0)), tmp19 & xmask,
eviction_policy='evict_last', other=0.0)
tmp25 = tmp23 + tmp24
tmp26 = tl.load(in_ptr3 + (12 + 16 * x1 + (-4 + x0)), tmp19 & xmask,
eviction_policy='evict_last', other=0.0)
tmp27 = tmp25 + tmp26
tmp28 = tmp20 / tmp27
tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype)
tmp30 = tl.where(tmp19, tmp28, tmp29)
tmp31 = tmp0 >= tmp17
tl.full([1], 12, tl.int64)
tmp34 = tl.load(in_ptr4 + (4 * x1 + (-8 + x0)), tmp31 & xmask,
eviction_policy='evict_last', other=0.0)
tmp35 = tl.load(in_ptr5 + (16 * x1 + (-8 + x0)), tmp31 & xmask,
eviction_policy='evict_last', other=0.0)
tmp36 = tl.load(in_ptr5 + (4 + 16 * x1 + (-8 + x0)), tmp31 & xmask,
eviction_policy='evict_last', other=0.0)
tmp37 = tmp35 + tmp36
tmp38 = tl.load(in_ptr5 + (8 + 16 * x1 + (-8 + x0)), tmp31 & xmask,
eviction_policy='evict_last', other=0.0)
tmp39 = tmp37 + tmp38
tmp40 = tl.load(in_ptr5 + (12 + 16 * x1 + (-8 + x0)), tmp31 & xmask,
eviction_policy='evict_last', other=0.0)
tmp41 = tmp39 + tmp40
tmp42 = tmp34 / tmp41
tmp43 = tl.full(tmp42.shape, 0.0, tmp42.dtype)
tmp44 = tl.where(tmp31, tmp42, tmp43)
tmp45 = tl.where(tmp19, tmp30, tmp44)
tmp46 = tl.where(tmp4, tmp15, tmp45)
tl.store(out_ptr0 + x2, tmp46, xmask)
@triton.jit
def triton_poi_fused_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_sigmoid_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_5(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_13, (4, 4), (4, 1))
assert_size_stride(primals_14, (4,), (1,))
assert_size_stride(primals_15, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_16, (4, 4), (4, 1))
assert_size_stride(primals_17, (4,), (1,))
assert_size_stride(primals_18, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_19, (4, 12), (12, 1))
assert_size_stride(primals_20, (4,), (1,))
assert_size_stride(primals_21, (1, 4), (4, 1))
assert_size_stride(primals_22, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0)
del buf0
buf22 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(64)](buf1,
primals_2, buf22, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_mul_relu_sum_1[grid(16)](buf2, primals_5,
primals_6, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_9, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf4)
del primals_7
buf5 = reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 0)
del buf4
buf20 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(64)](buf5,
primals_8, buf20, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_8
buf6 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf5, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf6)
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_mul_relu_sum_1[grid(16)](buf6, primals_11,
primals_12, buf7, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf8 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_15, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), out=buf8)
del primals_13
buf9 = reinterpret_tensor(buf8, (4, 4, 4), (16, 4, 1), 0)
del buf8
buf18 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(64)](buf9,
primals_14, buf18, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_14
buf10 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf9, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_16, (4, 4), (1, 4), 0), out=buf10)
buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_mul_relu_sum_1[grid(16)](buf10, primals_17,
primals_18, buf11, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf12 = empty_strided_cuda((4, 12), (12, 1), torch.float32)
triton_poi_fused_cat_2[grid(48)](buf3, primals_6, buf7, primals_12,
buf11, primals_18, buf12, 48, XBLOCK=64, num_warps=1, num_stages=1)
del buf11
del buf3
buf13 = buf7
del buf7
extern_kernels.mm(buf12, reinterpret_tensor(primals_19, (12, 4), (1,
12), 0), out=buf13)
buf14 = buf13
del buf13
triton_poi_fused_relu_3[grid(16)](buf14, primals_20, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_20
buf15 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.mm(buf14, reinterpret_tensor(primals_21, (4, 1), (1,
4), 0), out=buf15)
buf16 = buf15
del buf15
triton_poi_fused_sigmoid_4[grid(4)](buf16, primals_22, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del primals_22
buf17 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_5[grid(64)](buf10,
primals_17, buf17, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf10
del primals_17
buf19 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_5[grid(64)](buf6,
primals_11, buf19, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf6
del primals_11
buf21 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_5[grid(64)](buf2,
primals_5, buf21, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf2
del primals_5
return (buf16, primals_6, primals_12, primals_18, reinterpret_tensor(
primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(buf1, (16, 4), (
4, 1), 0), reinterpret_tensor(primals_9, (16, 4), (4, 1), 0),
reinterpret_tensor(buf5, (16, 4), (4, 1), 0), reinterpret_tensor(
primals_15, (16, 4), (4, 1), 0), reinterpret_tensor(buf9, (16, 4),
(4, 1), 0), buf12, buf14, buf16, primals_21, primals_19, buf17,
primals_16, buf18, buf19, primals_10, buf20, buf21, primals_4, buf22)
class SetConvNew(nn.Module):
def __init__(self, sample_feats, predicate_feats, join_feats, hid_units):
super(SetConvNew, self).__init__()
self.sample_mlp1 = nn.Linear(sample_feats, hid_units)
self.sample_mlp2 = nn.Linear(hid_units, hid_units)
self.predicate_mlp1 = nn.Linear(predicate_feats, hid_units)
self.predicate_mlp2 = nn.Linear(hid_units, hid_units)
self.join_mlp1 = nn.Linear(join_feats, hid_units)
self.join_mlp2 = nn.Linear(hid_units, hid_units)
self.out_mlp1 = nn.Linear(hid_units * 3, hid_units)
self.out_mlp2 = nn.Linear(hid_units, 1)
def forward(self, input_0, input_1, input_2, input_3, input_4, input_5):
primals_1 = self.sample_mlp1.weight
primals_2 = self.sample_mlp1.bias
primals_4 = self.sample_mlp2.weight
primals_5 = self.sample_mlp2.bias
primals_7 = self.predicate_mlp1.weight
primals_8 = self.predicate_mlp1.bias
primals_10 = self.predicate_mlp2.weight
primals_11 = self.predicate_mlp2.bias
primals_13 = self.join_mlp1.weight
primals_14 = self.join_mlp1.bias
primals_16 = self.join_mlp2.weight
primals_17 = self.join_mlp2.bias
primals_19 = self.out_mlp1.weight
primals_20 = self.out_mlp1.bias
primals_21 = self.out_mlp2.weight
primals_22 = self.out_mlp2.bias
primals_3 = input_0
primals_6 = input_1
primals_9 = input_2
primals_12 = input_3
primals_15 = input_4
primals_18 = input_5
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22])
return output[0]
| danield137/deep_query_optimzation | SetConv | false | 1,790 | [
"MIT"
] | 0 | 01a25c966338007f15d14dea1b37e388e47bcfe3 | https://github.com/danield137/deep_query_optimzation/tree/01a25c966338007f15d14dea1b37e388e47bcfe3 | import torch
from torch.nn import functional as F
import torch.nn as nn
class Model(nn.Module):
def __init__(self, sample_feats, predicate_feats, join_feats, hid_units):
super().__init__()
self.sample_mlp1 = nn.Linear(sample_feats, hid_units)
self.sample_mlp2 = nn.Linear(hid_units, hid_units)
self.predicate_mlp1 = nn.Linear(predicate_feats, hid_units)
self.predicate_mlp2 = nn.Linear(hid_units, hid_units)
self.join_mlp1 = nn.Linear(join_feats, hid_units)
self.join_mlp2 = nn.Linear(hid_units, hid_units)
self.out_mlp1 = nn.Linear(hid_units * 3, hid_units)
self.out_mlp2 = nn.Linear(hid_units, 1)
def forward(self, samples, predicates, joins, sample_mask,
predicate_mask, join_mask):
hid_sample = F.relu(self.sample_mlp1(samples))
hid_sample = F.relu(self.sample_mlp2(hid_sample))
hid_sample = hid_sample * sample_mask
hid_sample = torch.sum(hid_sample, dim=1, keepdim=False)
sample_norm = sample_mask.sum(1, keepdim=False)
hid_sample = hid_sample / sample_norm
hid_predicate = F.relu(self.predicate_mlp1(predicates))
hid_predicate = F.relu(self.predicate_mlp2(hid_predicate))
hid_predicate = hid_predicate * predicate_mask
hid_predicate = torch.sum(hid_predicate, dim=1, keepdim=False)
predicate_norm = predicate_mask.sum(1, keepdim=False)
hid_predicate = hid_predicate / predicate_norm
hid_join = F.relu(self.join_mlp1(joins))
hid_join = F.relu(self.join_mlp2(hid_join))
hid_join = hid_join * join_mask
hid_join = torch.sum(hid_join, dim=1, keepdim=False)
join_norm = join_mask.sum(1, keepdim=False)
hid_join = hid_join / join_norm
hid = torch.cat((hid_sample, hid_predicate, hid_join), 1)
hid = F.relu(self.out_mlp1(hid))
out = torch.sigmoid(self.out_mlp2(hid))
return out
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4,
4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4,
4])]
def get_init_inputs():
return [[], {'sample_feats': 4, 'predicate_feats': 4, 'join_feats': 4,
'hid_units': 4}]
|
SetConv | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/by/cbyavqfpm6cctsjn76scubidnajec26whr355vus7lq6jaaa5rcx.py
# Topologically Sorted Source Nodes: [hid_sample], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# hid_sample => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_8 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/vq/cvqwwsgn7w5hog5arxcl3upsq3zp4jydsf32ey4rg27yhonfi7tj.py
# Topologically Sorted Source Nodes: [hid_sample_1, hid_sample_2, hid_sample_3], Original ATen: [aten.relu, aten.mul, aten.sum]
# Source node to ATen node mapping:
# hid_sample_1 => relu_1
# hid_sample_2 => mul
# hid_sample_3 => sum_1
# Graph fragment:
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%relu_1, %primals_6), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {})
triton_poi_fused_mul_relu_sum_1 = async_compile.triton('triton_poi_fused_mul_relu_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_relu_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_relu_sum_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + (x0 + (16*x1)), xmask)
tmp7 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask)
tmp10 = tl.load(in_ptr2 + (4 + x0 + (16*x1)), xmask)
tmp13 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask)
tmp16 = tl.load(in_ptr2 + (8 + x0 + (16*x1)), xmask)
tmp19 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask)
tmp22 = tl.load(in_ptr2 + (12 + x0 + (16*x1)), xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = tmp4 * tmp5
tmp8 = tmp7 + tmp1
tmp9 = triton_helpers.maximum(tmp3, tmp8)
tmp11 = tmp9 * tmp10
tmp12 = tmp6 + tmp11
tmp14 = tmp13 + tmp1
tmp15 = triton_helpers.maximum(tmp3, tmp14)
tmp17 = tmp15 * tmp16
tmp18 = tmp12 + tmp17
tmp20 = tmp19 + tmp1
tmp21 = triton_helpers.maximum(tmp3, tmp20)
tmp23 = tmp21 * tmp22
tmp24 = tmp18 + tmp23
tl.store(out_ptr0 + (x2), tmp24, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/4u/c4u5itl4z4xvyyf4fxzpg772zfwlt7vw7c4fi6hnardolciyx4oo.py
# Topologically Sorted Source Nodes: [hid], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# hid => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%div, %div_1, %div_2, %div_3], 1), kwargs = {})
triton_poi_fused_cat_2 = async_compile.triton('triton_poi_fused_cat_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 20, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + ((16*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tl.load(in_ptr1 + (4 + (16*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = tl.load(in_ptr1 + (8 + (16*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tmp8 + tmp9
tmp11 = tl.load(in_ptr1 + (12 + (16*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp12 = tmp10 + tmp11
tmp13 = tmp5 / tmp12
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp4, tmp13, tmp14)
tmp16 = tmp0 >= tmp3
tmp17 = tl.full([1], 8, tl.int64)
tmp18 = tmp0 < tmp17
tmp19 = tmp16 & tmp18
tmp20 = tl.load(in_ptr2 + ((4*x1) + ((-4) + x0)), tmp19 & xmask, eviction_policy='evict_last', other=0.0)
tmp21 = tl.load(in_ptr3 + ((16*x1) + ((-4) + x0)), tmp19 & xmask, eviction_policy='evict_last', other=0.0)
tmp22 = tl.load(in_ptr3 + (4 + (16*x1) + ((-4) + x0)), tmp19 & xmask, eviction_policy='evict_last', other=0.0)
tmp23 = tmp21 + tmp22
tmp24 = tl.load(in_ptr3 + (8 + (16*x1) + ((-4) + x0)), tmp19 & xmask, eviction_policy='evict_last', other=0.0)
tmp25 = tmp23 + tmp24
tmp26 = tl.load(in_ptr3 + (12 + (16*x1) + ((-4) + x0)), tmp19 & xmask, eviction_policy='evict_last', other=0.0)
tmp27 = tmp25 + tmp26
tmp28 = tmp20 / tmp27
tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype)
tmp30 = tl.where(tmp19, tmp28, tmp29)
tmp31 = tmp0 >= tmp17
tmp32 = tl.full([1], 12, tl.int64)
tmp33 = tmp0 < tmp32
tmp34 = tmp31 & tmp33
tmp35 = tl.load(in_ptr4 + ((4*x1) + ((-8) + x0)), tmp34 & xmask, eviction_policy='evict_last', other=0.0)
tmp36 = tl.load(in_ptr5 + ((16*x1) + ((-8) + x0)), tmp34 & xmask, eviction_policy='evict_last', other=0.0)
tmp37 = tl.load(in_ptr5 + (4 + (16*x1) + ((-8) + x0)), tmp34 & xmask, eviction_policy='evict_last', other=0.0)
tmp38 = tmp36 + tmp37
tmp39 = tl.load(in_ptr5 + (8 + (16*x1) + ((-8) + x0)), tmp34 & xmask, eviction_policy='evict_last', other=0.0)
tmp40 = tmp38 + tmp39
tmp41 = tl.load(in_ptr5 + (12 + (16*x1) + ((-8) + x0)), tmp34 & xmask, eviction_policy='evict_last', other=0.0)
tmp42 = tmp40 + tmp41
tmp43 = tmp35 / tmp42
tmp44 = tl.full(tmp43.shape, 0.0, tmp43.dtype)
tmp45 = tl.where(tmp34, tmp43, tmp44)
tmp46 = tmp0 >= tmp32
tmp47 = tl.full([1], 16, tl.int64)
tmp48 = tmp0 < tmp47
tmp49 = tl.load(in_ptr6 + ((4*x1) + ((-12) + x0)), tmp46 & xmask, eviction_policy='evict_last', other=0.0)
tmp50 = tl.load(in_ptr7 + ((16*x1) + ((-12) + x0)), tmp46 & xmask, eviction_policy='evict_last', other=0.0)
tmp51 = tl.load(in_ptr7 + (4 + (16*x1) + ((-12) + x0)), tmp46 & xmask, eviction_policy='evict_last', other=0.0)
tmp52 = tmp50 + tmp51
tmp53 = tl.load(in_ptr7 + (8 + (16*x1) + ((-12) + x0)), tmp46 & xmask, eviction_policy='evict_last', other=0.0)
tmp54 = tmp52 + tmp53
tmp55 = tl.load(in_ptr7 + (12 + (16*x1) + ((-12) + x0)), tmp46 & xmask, eviction_policy='evict_last', other=0.0)
tmp56 = tmp54 + tmp55
tmp57 = tmp49 / tmp56
tmp58 = tl.full(tmp57.shape, 0.0, tmp57.dtype)
tmp59 = tl.where(tmp46, tmp57, tmp58)
tmp60 = tl.where(tmp34, tmp45, tmp59)
tmp61 = tl.where(tmp19, tmp30, tmp60)
tmp62 = tl.where(tmp4, tmp15, tmp61)
tl.store(out_ptr0 + (x2), tmp62, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/bh/cbh6nn7gkahdovv3eaz57lyjbw3lw7agupcxl6g54cytbdxy5y57.py
# Topologically Sorted Source Nodes: [hid_1], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# hid_1 => relu_8
# Graph fragment:
# %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_26), kwargs = {})
# %relu_8 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {})
triton_poi_fused_relu_3 = async_compile.triton('triton_poi_fused_relu_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/ub/cubdt4klg6eksq4s777k5y2toe23divpkhrifhe2wzbelcrnsvya.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# out => sigmoid
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_28), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_sigmoid_4 = async_compile.triton('triton_poi_fused_sigmoid_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/ql/cqlkizbtc5ckgiftunxao47hductoqdvbrd2mxnieseagrgndyso.py
# Topologically Sorted Source Nodes: [hid_join_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# hid_join_1 => relu_7
# Graph fragment:
# %relu_7 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_15,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_7, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_5 = async_compile.triton('triton_poi_fused_relu_threshold_backward_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4, ), (1, ))
assert_size_stride(primals_12, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_13, (4, 4), (4, 1))
assert_size_stride(primals_14, (4, ), (1, ))
assert_size_stride(primals_15, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_16, (4, 4), (4, 1))
assert_size_stride(primals_17, (4, ), (1, ))
assert_size_stride(primals_18, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_19, (4, 4), (4, 1))
assert_size_stride(primals_20, (4, ), (1, ))
assert_size_stride(primals_21, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_22, (4, 4), (4, 1))
assert_size_stride(primals_23, (4, ), (1, ))
assert_size_stride(primals_24, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_25, (4, 16), (16, 1))
assert_size_stride(primals_26, (4, ), (1, ))
assert_size_stride(primals_27, (1, 4), (4, 1))
assert_size_stride(primals_28, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0); del buf0 # reuse
buf28 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [hid_sample], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf28, 64, grid=grid(64), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [hid_sample_1, hid_sample_2, hid_sample_3], Original ATen: [aten.relu, aten.mul, aten.sum]
triton_poi_fused_mul_relu_sum_1.run(buf2, primals_5, primals_6, buf3, 16, grid=grid(16), stream=stream0)
buf4 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_9, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf4)
del primals_7
buf5 = reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 0); del buf4 # reuse
buf26 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [hid_predicate], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf5, primals_8, buf26, 64, grid=grid(64), stream=stream0)
del primals_8
buf6 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf5, (16, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf6)
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [hid_predicate_1, hid_predicate_2, hid_predicate_3], Original ATen: [aten.relu, aten.mul, aten.sum]
triton_poi_fused_mul_relu_sum_1.run(buf6, primals_11, primals_12, buf7, 16, grid=grid(16), stream=stream0)
buf8 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_15, (16, 4), (4, 1), 0), reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), out=buf8)
del primals_13
buf9 = reinterpret_tensor(buf8, (4, 4, 4), (16, 4, 1), 0); del buf8 # reuse
buf24 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [hid_predicate_uri], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf9, primals_14, buf24, 64, grid=grid(64), stream=stream0)
del primals_14
buf10 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf9, (16, 4), (4, 1), 0), reinterpret_tensor(primals_16, (4, 4), (1, 4), 0), out=buf10)
buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [hid_predicate_uri_1, hid_predicate_uri_2, hid_predicate_uri_3], Original ATen: [aten.relu, aten.mul, aten.sum]
triton_poi_fused_mul_relu_sum_1.run(buf10, primals_17, primals_18, buf11, 16, grid=grid(16), stream=stream0)
buf12 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_21, (16, 4), (4, 1), 0), reinterpret_tensor(primals_19, (4, 4), (1, 4), 0), out=buf12)
del primals_19
buf13 = reinterpret_tensor(buf12, (4, 4, 4), (16, 4, 1), 0); del buf12 # reuse
buf22 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [hid_join], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf13, primals_20, buf22, 64, grid=grid(64), stream=stream0)
del primals_20
buf14 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf13, (16, 4), (4, 1), 0), reinterpret_tensor(primals_22, (4, 4), (1, 4), 0), out=buf14)
buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [hid_join_1, hid_join_2, hid_join_3], Original ATen: [aten.relu, aten.mul, aten.sum]
triton_poi_fused_mul_relu_sum_1.run(buf14, primals_23, primals_24, buf15, 16, grid=grid(16), stream=stream0)
buf16 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [hid], Original ATen: [aten.cat]
triton_poi_fused_cat_2.run(buf3, primals_6, buf7, primals_12, buf11, primals_18, buf15, primals_24, buf16, 64, grid=grid(64), stream=stream0)
del buf11
del buf15
del buf3
buf17 = buf7; del buf7 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf16, reinterpret_tensor(primals_25, (16, 4), (1, 16), 0), out=buf17)
buf18 = buf17; del buf17 # reuse
# Topologically Sorted Source Nodes: [hid_1], Original ATen: [aten.relu]
triton_poi_fused_relu_3.run(buf18, primals_26, 16, grid=grid(16), stream=stream0)
del primals_26
buf19 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf18, reinterpret_tensor(primals_27, (4, 1), (1, 4), 0), out=buf19)
buf20 = buf19; del buf19 # reuse
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.sigmoid]
triton_poi_fused_sigmoid_4.run(buf20, primals_28, 4, grid=grid(4), stream=stream0)
del primals_28
buf21 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [hid_join_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_5.run(buf14, primals_23, buf21, 64, grid=grid(64), stream=stream0)
del buf14
del primals_23
buf23 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [hid_predicate_uri_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_5.run(buf10, primals_17, buf23, 64, grid=grid(64), stream=stream0)
del buf10
del primals_17
buf25 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [hid_predicate_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_5.run(buf6, primals_11, buf25, 64, grid=grid(64), stream=stream0)
del buf6
del primals_11
buf27 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [hid_sample_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_5.run(buf2, primals_5, buf27, 64, grid=grid(64), stream=stream0)
del buf2
del primals_5
return (buf20, primals_6, primals_12, primals_18, primals_24, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(buf1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (16, 4), (4, 1), 0), reinterpret_tensor(buf5, (16, 4), (4, 1), 0), reinterpret_tensor(primals_15, (16, 4), (4, 1), 0), reinterpret_tensor(buf9, (16, 4), (4, 1), 0), reinterpret_tensor(primals_21, (16, 4), (4, 1), 0), reinterpret_tensor(buf13, (16, 4), (4, 1), 0), buf16, buf18, buf20, primals_27, primals_25, buf21, primals_22, buf22, buf23, primals_16, buf24, buf25, primals_10, buf26, buf27, primals_4, buf28, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_21 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_22 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_23 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_24 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_25 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_26 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_27 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_28 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class SetConv(nn.Module):
def __init__(self, sample_feats, predicate_feats, predicate_uri_feats,
join_feats, hid_units):
super(SetConv, self).__init__()
self.sample_mlp1 = nn.Linear(sample_feats, hid_units)
self.sample_mlp2 = nn.Linear(hid_units, hid_units)
self.predicate_mlp1 = nn.Linear(predicate_feats, hid_units)
self.predicate_mlp2 = nn.Linear(hid_units, hid_units)
self.predicate_uri_mlp1 = nn.Linear(predicate_uri_feats, hid_units)
self.predicate_uri_mlp2 = nn.Linear(hid_units, hid_units)
self.join_mlp1 = nn.Linear(join_feats, hid_units)
self.join_mlp2 = nn.Linear(hid_units, hid_units)
self.out_mlp1 = nn.Linear(hid_units * 4, hid_units)
self.out_mlp2 = nn.Linear(hid_units, 1)
def forward(self, samples, predicates, predicates_uris, joins,
sample_mask, predicate_mask, predicate_uri_mask, join_mask):
hid_sample = F.relu(self.sample_mlp1(samples))
hid_sample = F.relu(self.sample_mlp2(hid_sample))
hid_sample = hid_sample * sample_mask
hid_sample = torch.sum(hid_sample, dim=1, keepdim=False)
sample_norm = sample_mask.sum(1, keepdim=False)
hid_sample = hid_sample / sample_norm
hid_predicate = F.relu(self.predicate_mlp1(predicates))
hid_predicate = F.relu(self.predicate_mlp2(hid_predicate))
hid_predicate = hid_predicate * predicate_mask
hid_predicate = torch.sum(hid_predicate, dim=1, keepdim=False)
predicate_norm = predicate_mask.sum(1, keepdim=False)
hid_predicate = hid_predicate / predicate_norm
hid_predicate_uri = F.relu(self.predicate_uri_mlp1(predicates_uris))
hid_predicate_uri = F.relu(self.predicate_uri_mlp2(hid_predicate_uri))
hid_predicate_uri = hid_predicate_uri * predicate_uri_mask
hid_predicate_uri = torch.sum(hid_predicate_uri, dim=1, keepdim=False)
predicate_uri_norm = predicate_uri_mask.sum(1, keepdim=False)
hid_predicate_uri = hid_predicate_uri / predicate_uri_norm
hid_join = F.relu(self.join_mlp1(joins))
hid_join = F.relu(self.join_mlp2(hid_join))
hid_join = hid_join * join_mask
hid_join = torch.sum(hid_join, dim=1, keepdim=False)
join_norm = join_mask.sum(1, keepdim=False)
hid_join = hid_join / join_norm
hid = torch.cat((hid_sample, hid_predicate, hid_predicate_uri,
hid_join), 1)
hid = F.relu(self.out_mlp1(hid))
out = torch.sigmoid(self.out_mlp2(hid))
return out
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4,
4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4,
4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'sample_feats': 4, 'predicate_feats': 4,
'predicate_uri_feats': 4, 'join_feats': 4, 'hid_units': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_mul_relu_sum_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + (x0 + 16 * x1), xmask)
tmp7 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp10 = tl.load(in_ptr2 + (4 + x0 + 16 * x1), xmask)
tmp13 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp16 = tl.load(in_ptr2 + (8 + x0 + 16 * x1), xmask)
tmp19 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp22 = tl.load(in_ptr2 + (12 + x0 + 16 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = tmp4 * tmp5
tmp8 = tmp7 + tmp1
tmp9 = triton_helpers.maximum(tmp3, tmp8)
tmp11 = tmp9 * tmp10
tmp12 = tmp6 + tmp11
tmp14 = tmp13 + tmp1
tmp15 = triton_helpers.maximum(tmp3, tmp14)
tmp17 = tmp15 * tmp16
tmp18 = tmp12 + tmp17
tmp20 = tmp19 + tmp1
tmp21 = triton_helpers.maximum(tmp3, tmp20)
tmp23 = tmp21 * tmp22
tmp24 = tmp18 + tmp23
tl.store(out_ptr0 + x2, tmp24, xmask)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, in_ptr6, in_ptr7, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + (16 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp7 = tl.load(in_ptr1 + (4 + 16 * x1 + x0), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = tl.load(in_ptr1 + (8 + 16 * x1 + x0), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tmp8 + tmp9
tmp11 = tl.load(in_ptr1 + (12 + 16 * x1 + x0), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp12 = tmp10 + tmp11
tmp13 = tmp5 / tmp12
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp4, tmp13, tmp14)
tmp16 = tmp0 >= tmp3
tmp17 = tl.full([1], 8, tl.int64)
tmp18 = tmp0 < tmp17
tmp19 = tmp16 & tmp18
tmp20 = tl.load(in_ptr2 + (4 * x1 + (-4 + x0)), tmp19 & xmask,
eviction_policy='evict_last', other=0.0)
tmp21 = tl.load(in_ptr3 + (16 * x1 + (-4 + x0)), tmp19 & xmask,
eviction_policy='evict_last', other=0.0)
tmp22 = tl.load(in_ptr3 + (4 + 16 * x1 + (-4 + x0)), tmp19 & xmask,
eviction_policy='evict_last', other=0.0)
tmp23 = tmp21 + tmp22
tmp24 = tl.load(in_ptr3 + (8 + 16 * x1 + (-4 + x0)), tmp19 & xmask,
eviction_policy='evict_last', other=0.0)
tmp25 = tmp23 + tmp24
tmp26 = tl.load(in_ptr3 + (12 + 16 * x1 + (-4 + x0)), tmp19 & xmask,
eviction_policy='evict_last', other=0.0)
tmp27 = tmp25 + tmp26
tmp28 = tmp20 / tmp27
tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype)
tmp30 = tl.where(tmp19, tmp28, tmp29)
tmp31 = tmp0 >= tmp17
tmp32 = tl.full([1], 12, tl.int64)
tmp33 = tmp0 < tmp32
tmp34 = tmp31 & tmp33
tmp35 = tl.load(in_ptr4 + (4 * x1 + (-8 + x0)), tmp34 & xmask,
eviction_policy='evict_last', other=0.0)
tmp36 = tl.load(in_ptr5 + (16 * x1 + (-8 + x0)), tmp34 & xmask,
eviction_policy='evict_last', other=0.0)
tmp37 = tl.load(in_ptr5 + (4 + 16 * x1 + (-8 + x0)), tmp34 & xmask,
eviction_policy='evict_last', other=0.0)
tmp38 = tmp36 + tmp37
tmp39 = tl.load(in_ptr5 + (8 + 16 * x1 + (-8 + x0)), tmp34 & xmask,
eviction_policy='evict_last', other=0.0)
tmp40 = tmp38 + tmp39
tmp41 = tl.load(in_ptr5 + (12 + 16 * x1 + (-8 + x0)), tmp34 & xmask,
eviction_policy='evict_last', other=0.0)
tmp42 = tmp40 + tmp41
tmp43 = tmp35 / tmp42
tmp44 = tl.full(tmp43.shape, 0.0, tmp43.dtype)
tmp45 = tl.where(tmp34, tmp43, tmp44)
tmp46 = tmp0 >= tmp32
tl.full([1], 16, tl.int64)
tmp49 = tl.load(in_ptr6 + (4 * x1 + (-12 + x0)), tmp46 & xmask,
eviction_policy='evict_last', other=0.0)
tmp50 = tl.load(in_ptr7 + (16 * x1 + (-12 + x0)), tmp46 & xmask,
eviction_policy='evict_last', other=0.0)
tmp51 = tl.load(in_ptr7 + (4 + 16 * x1 + (-12 + x0)), tmp46 & xmask,
eviction_policy='evict_last', other=0.0)
tmp52 = tmp50 + tmp51
tmp53 = tl.load(in_ptr7 + (8 + 16 * x1 + (-12 + x0)), tmp46 & xmask,
eviction_policy='evict_last', other=0.0)
tmp54 = tmp52 + tmp53
tmp55 = tl.load(in_ptr7 + (12 + 16 * x1 + (-12 + x0)), tmp46 & xmask,
eviction_policy='evict_last', other=0.0)
tmp56 = tmp54 + tmp55
tmp57 = tmp49 / tmp56
tmp58 = tl.full(tmp57.shape, 0.0, tmp57.dtype)
tmp59 = tl.where(tmp46, tmp57, tmp58)
tmp60 = tl.where(tmp34, tmp45, tmp59)
tmp61 = tl.where(tmp19, tmp30, tmp60)
tmp62 = tl.where(tmp4, tmp15, tmp61)
tl.store(out_ptr0 + x2, tmp62, xmask)
@triton.jit
def triton_poi_fused_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_sigmoid_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_5(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27, primals_28
) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_13, (4, 4), (4, 1))
assert_size_stride(primals_14, (4,), (1,))
assert_size_stride(primals_15, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_16, (4, 4), (4, 1))
assert_size_stride(primals_17, (4,), (1,))
assert_size_stride(primals_18, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_19, (4, 4), (4, 1))
assert_size_stride(primals_20, (4,), (1,))
assert_size_stride(primals_21, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_22, (4, 4), (4, 1))
assert_size_stride(primals_23, (4,), (1,))
assert_size_stride(primals_24, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_25, (4, 16), (16, 1))
assert_size_stride(primals_26, (4,), (1,))
assert_size_stride(primals_27, (1, 4), (4, 1))
assert_size_stride(primals_28, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0)
del buf0
buf28 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(64)](buf1,
primals_2, buf28, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_mul_relu_sum_1[grid(16)](buf2, primals_5,
primals_6, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_9, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf4)
del primals_7
buf5 = reinterpret_tensor(buf4, (4, 4, 4), (16, 4, 1), 0)
del buf4
buf26 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(64)](buf5,
primals_8, buf26, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_8
buf6 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf5, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf6)
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_mul_relu_sum_1[grid(16)](buf6, primals_11,
primals_12, buf7, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf8 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_15, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), out=buf8)
del primals_13
buf9 = reinterpret_tensor(buf8, (4, 4, 4), (16, 4, 1), 0)
del buf8
buf24 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(64)](buf9,
primals_14, buf24, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_14
buf10 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf9, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_16, (4, 4), (1, 4), 0), out=buf10)
buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_mul_relu_sum_1[grid(16)](buf10, primals_17,
primals_18, buf11, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf12 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_21, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_19, (4, 4), (1, 4), 0), out=buf12)
del primals_19
buf13 = reinterpret_tensor(buf12, (4, 4, 4), (16, 4, 1), 0)
del buf12
buf22 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(64)](buf13,
primals_20, buf22, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_20
buf14 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf13, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_22, (4, 4), (1, 4), 0), out=buf14)
buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_mul_relu_sum_1[grid(16)](buf14, primals_23,
primals_24, buf15, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf16 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
triton_poi_fused_cat_2[grid(64)](buf3, primals_6, buf7, primals_12,
buf11, primals_18, buf15, primals_24, buf16, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf11
del buf15
del buf3
buf17 = buf7
del buf7
extern_kernels.mm(buf16, reinterpret_tensor(primals_25, (16, 4), (1,
16), 0), out=buf17)
buf18 = buf17
del buf17
triton_poi_fused_relu_3[grid(16)](buf18, primals_26, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_26
buf19 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.mm(buf18, reinterpret_tensor(primals_27, (4, 1), (1,
4), 0), out=buf19)
buf20 = buf19
del buf19
triton_poi_fused_sigmoid_4[grid(4)](buf20, primals_28, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del primals_28
buf21 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_5[grid(64)](buf14,
primals_23, buf21, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf14
del primals_23
buf23 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_5[grid(64)](buf10,
primals_17, buf23, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf10
del primals_17
buf25 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_5[grid(64)](buf6,
primals_11, buf25, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf6
del primals_11
buf27 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_5[grid(64)](buf2,
primals_5, buf27, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf2
del primals_5
return (buf20, primals_6, primals_12, primals_18, primals_24,
reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(buf1, (16, 4), (4, 1), 0), reinterpret_tensor(
primals_9, (16, 4), (4, 1), 0), reinterpret_tensor(buf5, (16, 4), (
4, 1), 0), reinterpret_tensor(primals_15, (16, 4), (4, 1), 0),
reinterpret_tensor(buf9, (16, 4), (4, 1), 0), reinterpret_tensor(
primals_21, (16, 4), (4, 1), 0), reinterpret_tensor(buf13, (16, 4),
(4, 1), 0), buf16, buf18, buf20, primals_27, primals_25, buf21,
primals_22, buf22, buf23, primals_16, buf24, buf25, primals_10,
buf26, buf27, primals_4, buf28)
class SetConvNew(nn.Module):
def __init__(self, sample_feats, predicate_feats, predicate_uri_feats,
join_feats, hid_units):
super(SetConvNew, self).__init__()
self.sample_mlp1 = nn.Linear(sample_feats, hid_units)
self.sample_mlp2 = nn.Linear(hid_units, hid_units)
self.predicate_mlp1 = nn.Linear(predicate_feats, hid_units)
self.predicate_mlp2 = nn.Linear(hid_units, hid_units)
self.predicate_uri_mlp1 = nn.Linear(predicate_uri_feats, hid_units)
self.predicate_uri_mlp2 = nn.Linear(hid_units, hid_units)
self.join_mlp1 = nn.Linear(join_feats, hid_units)
self.join_mlp2 = nn.Linear(hid_units, hid_units)
self.out_mlp1 = nn.Linear(hid_units * 4, hid_units)
self.out_mlp2 = nn.Linear(hid_units, 1)
def forward(self, input_0, input_1, input_2, input_3, input_4, input_5,
input_6, input_7):
primals_1 = self.sample_mlp1.weight
primals_2 = self.sample_mlp1.bias
primals_4 = self.sample_mlp2.weight
primals_5 = self.sample_mlp2.bias
primals_7 = self.predicate_mlp1.weight
primals_8 = self.predicate_mlp1.bias
primals_10 = self.predicate_mlp2.weight
primals_11 = self.predicate_mlp2.bias
primals_13 = self.predicate_uri_mlp1.weight
primals_14 = self.predicate_uri_mlp1.bias
primals_16 = self.predicate_uri_mlp2.weight
primals_17 = self.predicate_uri_mlp2.bias
primals_19 = self.join_mlp1.weight
primals_20 = self.join_mlp1.bias
primals_22 = self.join_mlp2.weight
primals_23 = self.join_mlp2.bias
primals_25 = self.out_mlp1.weight
primals_26 = self.out_mlp1.bias
primals_27 = self.out_mlp2.weight
primals_28 = self.out_mlp2.bias
primals_3 = input_0
primals_6 = input_1
primals_9 = input_2
primals_12 = input_3
primals_15 = input_4
primals_18 = input_5
primals_21 = input_6
primals_24 = input_7
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28])
return output[0]
| dacasals/learnedcardinalities | SetConv | false | 1,791 | [
"MIT"
] | 0 | ee9741ce1a7b55ed18c33fbd6047484e50068037 | https://github.com/dacasals/learnedcardinalities/tree/ee9741ce1a7b55ed18c33fbd6047484e50068037 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, sample_feats, predicate_feats, predicate_uri_feats,
join_feats, hid_units):
super().__init__()
self.sample_mlp1 = nn.Linear(sample_feats, hid_units)
self.sample_mlp2 = nn.Linear(hid_units, hid_units)
self.predicate_mlp1 = nn.Linear(predicate_feats, hid_units)
self.predicate_mlp2 = nn.Linear(hid_units, hid_units)
self.predicate_uri_mlp1 = nn.Linear(predicate_uri_feats, hid_units)
self.predicate_uri_mlp2 = nn.Linear(hid_units, hid_units)
self.join_mlp1 = nn.Linear(join_feats, hid_units)
self.join_mlp2 = nn.Linear(hid_units, hid_units)
self.out_mlp1 = nn.Linear(hid_units * 4, hid_units)
self.out_mlp2 = nn.Linear(hid_units, 1)
def forward(self, samples, predicates, predicates_uris, joins,
sample_mask, predicate_mask, predicate_uri_mask, join_mask):
hid_sample = F.relu(self.sample_mlp1(samples))
hid_sample = F.relu(self.sample_mlp2(hid_sample))
hid_sample = hid_sample * sample_mask
hid_sample = torch.sum(hid_sample, dim=1, keepdim=False)
sample_norm = sample_mask.sum(1, keepdim=False)
hid_sample = hid_sample / sample_norm
hid_predicate = F.relu(self.predicate_mlp1(predicates))
hid_predicate = F.relu(self.predicate_mlp2(hid_predicate))
hid_predicate = hid_predicate * predicate_mask
hid_predicate = torch.sum(hid_predicate, dim=1, keepdim=False)
predicate_norm = predicate_mask.sum(1, keepdim=False)
hid_predicate = hid_predicate / predicate_norm
hid_predicate_uri = F.relu(self.predicate_uri_mlp1(predicates_uris))
hid_predicate_uri = F.relu(self.predicate_uri_mlp2(hid_predicate_uri))
hid_predicate_uri = hid_predicate_uri * predicate_uri_mask
hid_predicate_uri = torch.sum(hid_predicate_uri, dim=1, keepdim=False)
predicate_uri_norm = predicate_uri_mask.sum(1, keepdim=False)
hid_predicate_uri = hid_predicate_uri / predicate_uri_norm
hid_join = F.relu(self.join_mlp1(joins))
hid_join = F.relu(self.join_mlp2(hid_join))
hid_join = hid_join * join_mask
hid_join = torch.sum(hid_join, dim=1, keepdim=False)
join_norm = join_mask.sum(1, keepdim=False)
hid_join = hid_join / join_norm
hid = torch.cat((hid_sample, hid_predicate, hid_predicate_uri,
hid_join), 1)
hid = F.relu(self.out_mlp1(hid))
out = torch.sigmoid(self.out_mlp2(hid))
return out
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4,
4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4,
4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'sample_feats': 4, 'predicate_feats': 4,
'predicate_uri_feats': 4, 'join_feats': 4, 'hid_units': 4}]
|
SoftArgmax2D | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/7d/c7dubrsigenrd2jmd6iwmxj4ghxpnyetwaywlk3uefyl3icppnek.py
# Topologically Sorted Source Nodes: [max_1, sub, exp_x, mul_2, sum_1, add, exp_x_sum, mul_3, expected_x, mul, mul_1, expected_y], Original ATen: [aten.max, aten.sub, aten.exp, aten.mul, aten.sum, aten.add, aten.reciprocal]
# Source node to ATen node mapping:
# add => add
# exp_x => exp
# exp_x_sum => mul, reciprocal
# expected_x => sum_3
# expected_y => sum_2
# max_1 => max_1
# mul => mul_5
# mul_1 => mul_6
# mul_2 => mul_7
# mul_3 => mul_8
# sub => sub
# sum_1 => sum_1
# Graph fragment:
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%view, -1, True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %getitem), kwargs = {})
# %exp : [num_users=3] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_3, %exp), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1e-06), kwargs = {})
# %reciprocal : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%add,), kwargs = {})
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%reciprocal, 1.0), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_7, %mul), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_8, [-1], True), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_4, %exp), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_5, %mul), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_6, [-1], True), kwargs = {})
triton_per_fused_add_exp_max_mul_reciprocal_sub_sum_0 = async_compile.triton('triton_per_fused_add_exp_max_mul_reciprocal_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_exp_max_mul_reciprocal_sub_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_exp_max_mul_reciprocal_sub_sum_0(in_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float("-inf"))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = r1 % 4
tmp12 = tmp11.to(tl.float32)
tmp13 = 2.0
tmp14 = tmp12 < tmp13
tmp15 = 0.6666666666666666
tmp16 = tmp12 * tmp15
tmp17 = -1.0
tmp18 = tmp16 + tmp17
tmp19 = 3 + ((-1)*(r1 % 4))
tmp20 = tmp19.to(tl.float32)
tmp21 = tmp20 * tmp15
tmp22 = 1.0
tmp23 = tmp22 - tmp21
tmp24 = tl.where(tmp14, tmp18, tmp23)
tmp25 = tmp24 * tmp6
tmp26 = 1e-06
tmp27 = tmp10 + tmp26
tmp28 = tl.full([1, 1], 1, tl.int32)
tmp29 = tmp28 / tmp27
tmp30 = tmp29 * tmp22
tmp31 = tmp25 * tmp30
tmp32 = tl.broadcast_to(tmp31, [XBLOCK, RBLOCK])
tmp34 = tl.where(xmask, tmp32, 0)
tmp35 = tl.sum(tmp34, 1)[:, None]
tmp36 = (r1 // 4)
tmp37 = tmp36.to(tl.float32)
tmp38 = tmp37 < tmp13
tmp39 = tmp37 * tmp15
tmp40 = tmp39 + tmp17
tmp41 = 3 + ((-1)*(r1 // 4))
tmp42 = tmp41.to(tl.float32)
tmp43 = tmp42 * tmp15
tmp44 = tmp22 - tmp43
tmp45 = tl.where(tmp38, tmp40, tmp44)
tmp46 = tmp45 * tmp6
tmp47 = tmp46 * tmp30
tmp48 = tl.broadcast_to(tmp47, [XBLOCK, RBLOCK])
tmp50 = tl.where(xmask, tmp48, 0)
tmp51 = tl.sum(tmp50, 1)[:, None]
tl.store(out_ptr2 + (2*x0), tmp35, xmask)
tl.store(out_ptr3 + (2*x0), tmp51, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf5 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32)
buf3 = reinterpret_tensor(buf5, (4, 4, 1), (8, 2, 1), 0) # alias
buf4 = reinterpret_tensor(buf5, (4, 4, 1), (8, 2, 1), 1) # alias
# Topologically Sorted Source Nodes: [max_1, sub, exp_x, mul_2, sum_1, add, exp_x_sum, mul_3, expected_x, mul, mul_1, expected_y], Original ATen: [aten.max, aten.sub, aten.exp, aten.mul, aten.sum, aten.add, aten.reciprocal]
stream0 = get_raw_stream(0)
triton_per_fused_add_exp_max_mul_reciprocal_sub_sum_0.run(arg0_1, buf3, buf4, 16, 16, grid=grid(16), stream=stream0)
del arg0_1
return (buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from typing import Optional
def create_meshgrid(x: 'torch.Tensor', normalized_coordinates: 'Optional[bool]'
) ->torch.Tensor:
assert len(x.shape) == 4, x.shape
_, _, height, width = x.shape
_device, _dtype = x.device, x.dtype
if normalized_coordinates:
xs = torch.linspace(-1.0, 1.0, width, device=_device, dtype=_dtype)
ys = torch.linspace(-1.0, 1.0, height, device=_device, dtype=_dtype)
else:
xs = torch.linspace(0, width - 1, width, device=_device, dtype=_dtype)
ys = torch.linspace(0, height - 1, height, device=_device, dtype=_dtype
)
return torch.meshgrid(ys, xs)
class SoftArgmax2D(nn.Module):
"""Creates a module that computes the Spatial Soft-Argmax 2D
of a given input heatmap.
Returns the index of the maximum 2d coordinates of the give map.
The output order is x-coord and y-coord.
Arguments:
normalized_coordinates (Optional[bool]): wether to return the
coordinates normalized in the range of [-1, 1]. Otherwise,
it will return the coordinates in the range of the input shape.
Default is True.
Shape:
- Input: :math:`(B, N, H, W)`
- Output: :math:`(B, N, 2)`
Examples::
>>> input = torch.rand(1, 4, 2, 3)
>>> m = tgm.losses.SpatialSoftArgmax2d()
>>> coords = m(input) # 1x4x2
>>> x_coord, y_coord = torch.chunk(coords, dim=-1, chunks=2)
"""
def __init__(self, normalized_coordinates: 'Optional[bool]'=True) ->None:
super(SoftArgmax2D, self).__init__()
self.normalized_coordinates: 'Optional[bool]' = normalized_coordinates
self.eps: 'float' = 1e-06
def forward(self, input: 'torch.Tensor') ->torch.Tensor:
if not torch.is_tensor(input):
raise TypeError('Input input type is not a torch.Tensor. Got {}'
.format(type(input)))
if not len(input.shape) == 4:
raise ValueError('Invalid input shape, we expect BxCxHxW. Got: {}'
.format(input.shape))
batch_size, channels, _height, _width = input.shape
x: 'torch.Tensor' = input.view(batch_size, channels, -1)
exp_x = torch.exp(x - torch.max(x, dim=-1, keepdim=True)[0])
exp_x_sum = 1.0 / (exp_x.sum(dim=-1, keepdim=True) + self.eps)
pos_y, pos_x = create_meshgrid(input, self.normalized_coordinates)
pos_x = pos_x.reshape(-1)
pos_y = pos_y.reshape(-1)
expected_y: 'torch.Tensor' = torch.sum(pos_y * exp_x * exp_x_sum,
dim=-1, keepdim=True)
expected_x: 'torch.Tensor' = torch.sum(pos_x * exp_x * exp_x_sum,
dim=-1, keepdim=True)
output: 'torch.Tensor' = torch.cat([expected_x, expected_y], dim=-1)
return output.view(batch_size, channels, 2)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
from typing import Optional
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_add_exp_max_mul_reciprocal_sub_sum_0(in_ptr0, out_ptr2,
out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = r1 % 4
tmp12 = tmp11.to(tl.float32)
tmp13 = 2.0
tmp14 = tmp12 < tmp13
tmp15 = 0.6666666666666666
tmp16 = tmp12 * tmp15
tmp17 = -1.0
tmp18 = tmp16 + tmp17
tmp19 = 3 + -1 * (r1 % 4)
tmp20 = tmp19.to(tl.float32)
tmp21 = tmp20 * tmp15
tmp22 = 1.0
tmp23 = tmp22 - tmp21
tmp24 = tl.where(tmp14, tmp18, tmp23)
tmp25 = tmp24 * tmp6
tmp26 = 1e-06
tmp27 = tmp10 + tmp26
tmp28 = tl.full([1, 1], 1, tl.int32)
tmp29 = tmp28 / tmp27
tmp30 = tmp29 * tmp22
tmp31 = tmp25 * tmp30
tmp32 = tl.broadcast_to(tmp31, [XBLOCK, RBLOCK])
tmp34 = tl.where(xmask, tmp32, 0)
tmp35 = tl.sum(tmp34, 1)[:, None]
tmp36 = r1 // 4
tmp37 = tmp36.to(tl.float32)
tmp38 = tmp37 < tmp13
tmp39 = tmp37 * tmp15
tmp40 = tmp39 + tmp17
tmp41 = 3 + -1 * (r1 // 4)
tmp42 = tmp41.to(tl.float32)
tmp43 = tmp42 * tmp15
tmp44 = tmp22 - tmp43
tmp45 = tl.where(tmp38, tmp40, tmp44)
tmp46 = tmp45 * tmp6
tmp47 = tmp46 * tmp30
tmp48 = tl.broadcast_to(tmp47, [XBLOCK, RBLOCK])
tmp50 = tl.where(xmask, tmp48, 0)
tmp51 = tl.sum(tmp50, 1)[:, None]
tl.store(out_ptr2 + 2 * x0, tmp35, xmask)
tl.store(out_ptr3 + 2 * x0, tmp51, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf5 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32)
buf3 = reinterpret_tensor(buf5, (4, 4, 1), (8, 2, 1), 0)
buf4 = reinterpret_tensor(buf5, (4, 4, 1), (8, 2, 1), 1)
get_raw_stream(0)
triton_per_fused_add_exp_max_mul_reciprocal_sub_sum_0[grid(16)](arg0_1,
buf3, buf4, 16, 16, XBLOCK=8, num_warps=2, num_stages=1)
del arg0_1
return buf5,
def create_meshgrid(x: 'torch.Tensor', normalized_coordinates: 'Optional[bool]'
) ->torch.Tensor:
assert len(x.shape) == 4, x.shape
_, _, height, width = x.shape
_device, _dtype = x.device, x.dtype
if normalized_coordinates:
xs = torch.linspace(-1.0, 1.0, width, device=_device, dtype=_dtype)
ys = torch.linspace(-1.0, 1.0, height, device=_device, dtype=_dtype)
else:
xs = torch.linspace(0, width - 1, width, device=_device, dtype=_dtype)
ys = torch.linspace(0, height - 1, height, device=_device, dtype=_dtype
)
return torch.meshgrid(ys, xs)
class SoftArgmax2DNew(nn.Module):
"""Creates a module that computes the Spatial Soft-Argmax 2D
of a given input heatmap.
Returns the index of the maximum 2d coordinates of the give map.
The output order is x-coord and y-coord.
Arguments:
normalized_coordinates (Optional[bool]): wether to return the
coordinates normalized in the range of [-1, 1]. Otherwise,
it will return the coordinates in the range of the input shape.
Default is True.
Shape:
- Input: :math:`(B, N, H, W)`
- Output: :math:`(B, N, 2)`
Examples::
>>> input = torch.rand(1, 4, 2, 3)
>>> m = tgm.losses.SpatialSoftArgmax2d()
>>> coords = m(input) # 1x4x2
>>> x_coord, y_coord = torch.chunk(coords, dim=-1, chunks=2)
"""
def __init__(self, normalized_coordinates: 'Optional[bool]'=True) ->None:
super(SoftArgmax2DNew, self).__init__()
self.normalized_coordinates: 'Optional[bool]' = normalized_coordinates
self.eps: 'float' = 1e-06
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| danyayay/ynet_adaptive | SoftArgmax2D | false | 1,792 | [
"MIT"
] | 0 | f1daea6f3d5ec8a7349c2ee72bf742df83786103 | https://github.com/danyayay/ynet_adaptive/tree/f1daea6f3d5ec8a7349c2ee72bf742df83786103 | import torch
import torch.nn as nn
from typing import Optional
def create_meshgrid(x: 'torch.Tensor', normalized_coordinates: 'Optional[bool]'
) ->torch.Tensor:
assert len(x.shape) == 4, x.shape
_, _, height, width = x.shape
_device, _dtype = x.device, x.dtype
if normalized_coordinates:
xs = torch.linspace(-1.0, 1.0, width, device=_device, dtype=_dtype)
ys = torch.linspace(-1.0, 1.0, height, device=_device, dtype=_dtype)
else:
xs = torch.linspace(0, width - 1, width, device=_device, dtype=_dtype)
ys = torch.linspace(0, height - 1, height, device=_device, dtype=_dtype
)
return torch.meshgrid(ys, xs)
class Model(nn.Module):
"""Creates a module that computes the Spatial Soft-Argmax 2D
of a given input heatmap.
Returns the index of the maximum 2d coordinates of the give map.
The output order is x-coord and y-coord.
Arguments:
normalized_coordinates (Optional[bool]): wether to return the
coordinates normalized in the range of [-1, 1]. Otherwise,
it will return the coordinates in the range of the input shape.
Default is True.
Shape:
- Input: :math:`(B, N, H, W)`
- Output: :math:`(B, N, 2)`
Examples::
>>> input = torch.rand(1, 4, 2, 3)
>>> m = tgm.losses.SpatialSoftArgmax2d()
>>> coords = m(input) # 1x4x2
>>> x_coord, y_coord = torch.chunk(coords, dim=-1, chunks=2)
"""
def __init__(self, normalized_coordinates: 'Optional[bool]'=True) ->None:
super().__init__()
self.normalized_coordinates: 'Optional[bool]' = normalized_coordinates
self.eps: 'float' = 1e-06
def forward(self, input: 'torch.Tensor') ->torch.Tensor:
if not torch.is_tensor(input):
raise TypeError('Input input type is not a torch.Tensor. Got {}'
.format(type(input)))
if not len(input.shape) == 4:
raise ValueError('Invalid input shape, we expect BxCxHxW. Got: {}'
.format(input.shape))
batch_size, channels, _height, _width = input.shape
x: 'torch.Tensor' = input.view(batch_size, channels, -1)
exp_x = torch.exp(x - torch.max(x, dim=-1, keepdim=True)[0])
exp_x_sum = 1.0 / (exp_x.sum(dim=-1, keepdim=True) + self.eps)
pos_y, pos_x = create_meshgrid(input, self.normalized_coordinates)
pos_x = pos_x.reshape(-1)
pos_y = pos_y.reshape(-1)
expected_y: 'torch.Tensor' = torch.sum(pos_y * exp_x * exp_x_sum,
dim=-1, keepdim=True)
expected_x: 'torch.Tensor' = torch.sum(pos_x * exp_x * exp_x_sum,
dim=-1, keepdim=True)
output: 'torch.Tensor' = torch.cat([expected_x, expected_y], dim=-1)
return output.view(batch_size, channels, 2)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
FilterNorm | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/oy/coysefd7hs2x7ufqvotgvhtsdqfeomxrrtusr5lre2lyxoe4u3on.py
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# x_4 => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_3, 0.25), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 1.0
tmp2 = tmp0 / tmp1
tmp3 = tmp0 - tmp2
tmp4 = tmp3 / tmp1
tmp5 = tmp3 - tmp4
tmp6 = tmp5 * tmp5
tmp7 = 0.0
tmp8 = tmp6 / tmp7
tmp9 = libdevice.sqrt(tmp8)
tmp10 = 1e-10
tmp11 = tmp9 + tmp10
tmp12 = tmp3 / tmp11
tmp13 = 0.25
tmp14 = tmp12 * tmp13
tl.store(out_ptr0 + (x0), tmp14, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from torch.nn.init import calculate_gain
class FilterNorm(nn.Module):
def __init__(self, in_channels, kernel_size, filter_type, nonlinearity=
'linear', running_std=False, running_mean=False):
assert filter_type in ('spatial', 'channel')
assert in_channels >= 1
super(FilterNorm, self).__init__()
self.in_channels = in_channels
self.filter_type = filter_type
self.runing_std = running_std
self.runing_mean = running_mean
std = calculate_gain(nonlinearity) / kernel_size
if running_std:
self.std = nn.Parameter(torch.randn(in_channels * kernel_size **
2) * std, requires_grad=True)
else:
self.std = std
if running_mean:
self.mean = nn.Parameter(torch.randn(in_channels * kernel_size **
2), requires_grad=True)
def forward(self, x):
if self.filter_type == 'spatial':
b, _, h, w = x.size()
x = x.reshape(b, self.in_channels, -1, h, w)
x = x - x.mean(dim=2).reshape(b, self.in_channels, 1, h, w)
x = x / (x.std(dim=2).reshape(b, self.in_channels, 1, h, w) + 1e-10
)
x = x.reshape(b, _, h, w)
if self.runing_std:
x = x * self.std[None, :, None, None]
else:
x = x * self.std
if self.runing_mean:
x = x + self.mean[None, :, None, None]
elif self.filter_type == 'channel':
b = x.size(0)
c = self.in_channels
x = x.reshape(b, c, -1)
x = x - x.mean(dim=2).reshape(b, c, 1)
x = x / (x.std(dim=2).reshape(b, c, 1) + 1e-10)
x = x.reshape(b, -1)
if self.runing_std:
x = x * self.std[None, :]
else:
x = x * self.std
if self.runing_mean:
x = x + self.mean[None, :]
else:
raise RuntimeError('Unsupported filter type {}'.format(self.
filter_type))
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'kernel_size': 4, 'filter_type': 'spatial'}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
from torch.nn.init import calculate_gain
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 / tmp1
tmp3 = tmp0 - tmp2
tmp4 = tmp3 / tmp1
tmp5 = tmp3 - tmp4
tmp6 = tmp5 * tmp5
tmp7 = 0.0
tmp8 = tmp6 / tmp7
tmp9 = libdevice.sqrt(tmp8)
tmp10 = 1e-10
tmp11 = tmp9 + tmp10
tmp12 = tmp3 / tmp11
tmp13 = 0.25
tmp14 = tmp12 * tmp13
tl.store(out_ptr0 + x0, tmp14, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class FilterNormNew(nn.Module):
def __init__(self, in_channels, kernel_size, filter_type, nonlinearity=
'linear', running_std=False, running_mean=False):
assert filter_type in ('spatial', 'channel')
assert in_channels >= 1
super(FilterNormNew, self).__init__()
self.in_channels = in_channels
self.filter_type = filter_type
self.runing_std = running_std
self.runing_mean = running_mean
std = calculate_gain(nonlinearity) / kernel_size
if running_std:
self.std = nn.Parameter(torch.randn(in_channels * kernel_size **
2) * std, requires_grad=True)
else:
self.std = std
if running_mean:
self.mean = nn.Parameter(torch.randn(in_channels * kernel_size **
2), requires_grad=True)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| danyayay/ynet_adaptive | FilterNorm | false | 1,793 | [
"MIT"
] | 0 | f1daea6f3d5ec8a7349c2ee72bf742df83786103 | https://github.com/danyayay/ynet_adaptive/tree/f1daea6f3d5ec8a7349c2ee72bf742df83786103 | import torch
import torch.nn as nn
from torch.nn.init import calculate_gain
class Model(nn.Module):
def __init__(self, in_channels, kernel_size, filter_type, nonlinearity=
'linear', running_std=False, running_mean=False):
assert filter_type in ('spatial', 'channel')
assert in_channels >= 1
super().__init__()
self.in_channels = in_channels
self.filter_type = filter_type
self.runing_std = running_std
self.runing_mean = running_mean
std = calculate_gain(nonlinearity) / kernel_size
if running_std:
self.std = nn.Parameter(torch.randn(in_channels * kernel_size **
2) * std, requires_grad=True)
else:
self.std = std
if running_mean:
self.mean = nn.Parameter(torch.randn(in_channels * kernel_size **
2), requires_grad=True)
def forward(self, x):
if self.filter_type == 'spatial':
b, _, h, w = x.size()
x = x.reshape(b, self.in_channels, -1, h, w)
x = x - x.mean(dim=2).reshape(b, self.in_channels, 1, h, w)
x = x / (x.std(dim=2).reshape(b, self.in_channels, 1, h, w) + 1e-10
)
x = x.reshape(b, _, h, w)
if self.runing_std:
x = x * self.std[None, :, None, None]
else:
x = x * self.std
if self.runing_mean:
x = x + self.mean[None, :, None, None]
elif self.filter_type == 'channel':
b = x.size(0)
c = self.in_channels
x = x.reshape(b, c, -1)
x = x - x.mean(dim=2).reshape(b, c, 1)
x = x / (x.std(dim=2).reshape(b, c, 1) + 1e-10)
x = x.reshape(b, -1)
if self.runing_std:
x = x * self.std[None, :]
else:
x = x * self.std
if self.runing_mean:
x = x + self.mean[None, :]
else:
raise RuntimeError('Unsupported filter type {}'.format(self.
filter_type))
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
TorchFocalLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/bm/cbmytncicnp6uqp5ykfrsev7at6sj46yxrakjch7kyhncgva7sz6.py
# Topologically Sorted Source Nodes: [invprobs, neg_3, mul_1, sub_2, mul_2, mul_3, exp_2, mul_4, mul, sub_1, neg, max_val, add_1, neg_1, exp, neg_2, sub, exp_1, add, log_, loss, loss_1], Original ATen: [aten.log_sigmoid_forward, aten.neg, aten.mul, aten.sub, aten.exp, aten.clamp, aten.add, aten.log]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# exp => exp
# exp_1 => exp_1
# exp_2 => exp_3
# invprobs => abs_1, exp_2, full_default, log1p, minimum, neg_4, sub_3
# log_ => log
# loss => add_2
# loss_1 => mul_5
# max_val => clamp_min
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# mul_3 => mul_3
# mul_4 => mul_4
# neg => neg
# neg_1 => neg_1
# neg_2 => neg_2
# neg_3 => neg_3
# sub => sub
# sub_1 => sub_1
# sub_2 => sub_2
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %neg_3 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg1_1,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 2.0), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_1, 1.0), kwargs = {})
# %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg_3, %sub_2), kwargs = {})
# %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default, %mul_2), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%mul_2,), kwargs = {})
# %neg_4 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {})
# %exp_2 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg_4,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp_2,), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %log1p), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, 2), kwargs = {})
# %exp_3 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul_3,), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%exp_3, 0.75), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %arg0_1), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %mul), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg1_1,), kwargs = {})
# %clamp_min : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%neg, 0), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_1, %clamp_min), kwargs = {})
# %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%clamp_min,), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg_1,), kwargs = {})
# %neg_2 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg1_1,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%neg_2, %clamp_min), kwargs = {})
# %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%exp, %exp_1), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add,), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %log), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_4, %add_2), kwargs = {})
triton_poi_fused_add_clamp_exp_log_log_sigmoid_forward_mul_neg_sub_0 = async_compile.triton('triton_poi_fused_add_clamp_exp_log_log_sigmoid_forward_mul_neg_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_exp_log_log_sigmoid_forward_mul_neg_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_exp_log_log_sigmoid_forward_mul_neg_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp2 = tl.load(in_ptr1 + (x0), xmask)
tmp1 = -tmp0
tmp3 = 2.0
tmp4 = tmp2 * tmp3
tmp5 = 1.0
tmp6 = tmp4 - tmp5
tmp7 = tmp1 * tmp6
tmp8 = 0.0
tmp9 = triton_helpers.minimum(tmp8, tmp7)
tmp10 = tl_math.abs(tmp7)
tmp11 = -tmp10
tmp12 = tl_math.exp(tmp11)
tmp13 = libdevice.log1p(tmp12)
tmp14 = tmp9 - tmp13
tmp15 = tmp14 * tmp3
tmp16 = tl_math.exp(tmp15)
tmp17 = 0.75
tmp18 = tmp16 * tmp17
tmp19 = tmp0 * tmp2
tmp20 = tmp0 - tmp19
tmp21 = triton_helpers.maximum(tmp1, tmp8)
tmp22 = tmp20 + tmp21
tmp23 = -tmp21
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp1 - tmp21
tmp26 = tl_math.exp(tmp25)
tmp27 = tmp24 + tmp26
tmp28 = tl_math.log(tmp27)
tmp29 = tmp22 + tmp28
tmp30 = tmp18 * tmp29
tl.store(out_ptr0 + (x0), tmp30, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/ux/cuxkxzb5hxm3btyfugprzqapoovnrzr4kefisb3diyuhmymga4le.py
# Topologically Sorted Source Nodes: [sum_1, mean], Original ATen: [aten.sum, aten.mean]
# Source node to ATen node mapping:
# mean => mean
# sum_1 => sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_5, [-1]), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_1,), kwargs = {})
triton_per_fused_mean_sum_1 = async_compile.triton('triton_per_fused_mean_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_sum_1(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (4*r0), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.sum(tmp7, 1)[:, None]
tmp10 = 64.0
tmp11 = tmp9 / tmp10
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp11, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [invprobs, neg_3, mul_1, sub_2, mul_2, mul_3, exp_2, mul_4, mul, sub_1, neg, max_val, add_1, neg_1, exp, neg_2, sub, exp_1, add, log_, loss, loss_1], Original ATen: [aten.log_sigmoid_forward, aten.neg, aten.mul, aten.sub, aten.exp, aten.clamp, aten.add, aten.log]
stream0 = get_raw_stream(0)
triton_poi_fused_add_clamp_exp_log_log_sigmoid_forward_mul_neg_sub_0.run(arg1_1, arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [sum_1, mean], Original ATen: [aten.sum, aten.mean]
triton_per_fused_mean_sum_1.run(buf2, buf0, 1, 64, grid=grid(1), stream=stream0)
del buf0
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
from torch import nn
class TorchFocalLoss(nn.Module):
"""Implementation of Focal Loss[1]_ modified from Catalyst [2]_ .
Arguments
---------
gamma : :class:`int` or :class:`float`
Focusing parameter. See [1]_ .
alpha : :class:`int` or :class:`float`
Normalization factor. See [1]_ .
References
----------
.. [1] https://arxiv.org/pdf/1708.02002.pdf
.. [2] https://catalyst-team.github.io/catalyst/
"""
def __init__(self, gamma=2, alpha=0.75):
super().__init__()
self.gamma = gamma
self.alpha = alpha
def forward(self, outputs, targets):
"""Calculate the loss function between `outputs` and `targets`.
Arguments
---------
outputs : :class:`torch.Tensor`
The output tensor from a model.
targets : :class:`torch.Tensor`
The training target.
Returns
-------
loss : :class:`torch.Variable`
The loss value.
"""
if targets.size() != outputs.size():
raise ValueError(
f'Targets and inputs must be same size. Got ({targets.size()}) and ({outputs.size()})'
)
max_val = (-outputs).clamp(min=0)
log_ = ((-max_val).exp() + (-outputs - max_val).exp()).log()
loss = outputs - outputs * targets + max_val + log_
invprobs = F.logsigmoid(-outputs * (targets * 2.0 - 1.0))
loss = self.alpha * (invprobs * self.gamma).exp() * loss
return loss.sum(dim=-1).mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_clamp_exp_log_log_sigmoid_forward_mul_neg_sub_0(
in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp1 = -tmp0
tmp3 = 2.0
tmp4 = tmp2 * tmp3
tmp5 = 1.0
tmp6 = tmp4 - tmp5
tmp7 = tmp1 * tmp6
tmp8 = 0.0
tmp9 = triton_helpers.minimum(tmp8, tmp7)
tmp10 = tl_math.abs(tmp7)
tmp11 = -tmp10
tmp12 = tl_math.exp(tmp11)
tmp13 = libdevice.log1p(tmp12)
tmp14 = tmp9 - tmp13
tmp15 = tmp14 * tmp3
tmp16 = tl_math.exp(tmp15)
tmp17 = 0.75
tmp18 = tmp16 * tmp17
tmp19 = tmp0 * tmp2
tmp20 = tmp0 - tmp19
tmp21 = triton_helpers.maximum(tmp1, tmp8)
tmp22 = tmp20 + tmp21
tmp23 = -tmp21
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp1 - tmp21
tmp26 = tl_math.exp(tmp25)
tmp27 = tmp24 + tmp26
tmp28 = tl_math.log(tmp27)
tmp29 = tmp22 + tmp28
tmp30 = tmp18 * tmp29
tl.store(out_ptr0 + x0, tmp30, xmask)
@triton.jit
def triton_per_fused_mean_sum_1(in_out_ptr0, in_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.sum(tmp7, 1)[:, None]
tmp10 = 64.0
tmp11 = tmp9 / tmp10
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp11, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_clamp_exp_log_log_sigmoid_forward_mul_neg_sub_0[
grid(256)](arg1_1, arg0_1, buf0, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
triton_per_fused_mean_sum_1[grid(1)](buf2, buf0, 1, 64, XBLOCK=1,
num_warps=2, num_stages=1)
del buf0
return buf2,
class TorchFocalLossNew(nn.Module):
"""Implementation of Focal Loss[1]_ modified from Catalyst [2]_ .
Arguments
---------
gamma : :class:`int` or :class:`float`
Focusing parameter. See [1]_ .
alpha : :class:`int` or :class:`float`
Normalization factor. See [1]_ .
References
----------
.. [1] https://arxiv.org/pdf/1708.02002.pdf
.. [2] https://catalyst-team.github.io/catalyst/
"""
def __init__(self, gamma=2, alpha=0.75):
super().__init__()
self.gamma = gamma
self.alpha = alpha
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| dannyjeck-matroid/solaris | TorchFocalLoss | false | 1,794 | [
"Apache-2.0"
] | 0 | 463d220c1fe14f811cbbbf528a7353022538006e | https://github.com/dannyjeck-matroid/solaris/tree/463d220c1fe14f811cbbbf528a7353022538006e | import torch
import torch.nn.functional as F
from torch import nn
class Model(nn.Module):
"""Implementation of Focal Loss[1]_ modified from Catalyst [2]_ .
Arguments
---------
gamma : :class:`int` or :class:`float`
Focusing parameter. See [1]_ .
alpha : :class:`int` or :class:`float`
Normalization factor. See [1]_ .
References
----------
.. [1] https://arxiv.org/pdf/1708.02002.pdf
.. [2] https://catalyst-team.github.io/catalyst/
"""
def __init__(self, gamma=2, alpha=0.75):
super().__init__()
self.gamma = gamma
self.alpha = alpha
def forward(self, outputs, targets):
"""Calculate the loss function between `outputs` and `targets`.
Arguments
---------
outputs : :class:`torch.Tensor`
The output tensor from a model.
targets : :class:`torch.Tensor`
The training target.
Returns
-------
loss : :class:`torch.Variable`
The loss value.
"""
if targets.size() != outputs.size():
raise ValueError(
f'Targets and inputs must be same size. Got ({targets.size()}) and ({outputs.size()})'
)
max_val = (-outputs).clamp(min=0)
log_ = ((-max_val).exp() + (-outputs - max_val).exp()).log()
loss = outputs - outputs * targets + max_val + log_
invprobs = F.logsigmoid(-outputs * (targets * 2.0 - 1.0))
loss = self.alpha * (invprobs * self.gamma).exp() * loss
return loss.sum(dim=-1).mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
Cauchy | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/w5/cw5z25nz7ina2xh2l5o2a4djlfzwvxuymmdy5rnj6minpjjk35l7.py
# Topologically Sorted Source Nodes: [neg, r, ra, truediv, pow_1, add_1, log, error, loss], Original ATen: [aten.neg, aten.add, aten.abs, aten.div, aten.pow, aten.log, aten.mul, aten.sum]
# Source node to ATen node mapping:
# add_1 => add_1
# error => mul
# log => log
# loss => sum_1
# neg => neg
# pow_1 => pow_1
# r => add
# ra => abs_1
# truediv => div
# Graph fragment:
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg0_1,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg1_1, %neg), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%add,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%abs_1, 1.0), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%div, 2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_1, 1), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%log, 0.5), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {})
triton_per_fused_abs_add_div_log_mul_neg_pow_sum_0 = async_compile.triton('triton_per_fused_abs_add_div_log_mul_neg_pow_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_add_div_log_mul_neg_pow_sum_0', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_abs_add_div_log_mul_neg_pow_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = -tmp1
tmp3 = tmp0 + tmp2
tmp4 = tl_math.abs(tmp3)
tmp5 = 1.0
tmp6 = tmp4 * tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp7 + tmp5
tmp9 = tl_math.log(tmp8)
tmp10 = 0.5
tmp11 = tmp9 * tmp10
tmp12 = tl.broadcast_to(tmp11, [RBLOCK])
tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0))
tl.store(out_ptr0 + (tl.full([1], 0, tl.int32)), tmp14, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [neg, r, ra, truediv, pow_1, add_1, log, error, loss], Original ATen: [aten.neg, aten.add, aten.abs, aten.div, aten.pow, aten.log, aten.mul, aten.sum]
stream0 = get_raw_stream(0)
triton_per_fused_abs_add_div_log_mul_neg_pow_sum_0.run(arg1_1, arg0_1, buf0, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.model_zoo
class Cauchy(nn.Module):
def __init__(self):
super(Cauchy, self).__init__()
self.c = 1.0
def forward(self, X, Y):
r = torch.add(X, -Y)
ra = torch.abs(r)
error = 0.5 * self.c ** 2 * torch.log(1 + (ra / self.c) ** 2)
loss = torch.sum(error)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.utils.model_zoo
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_add_div_log_mul_neg_pow_sum_0(in_ptr0, in_ptr1,
out_ptr0, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = -tmp1
tmp3 = tmp0 + tmp2
tmp4 = tl_math.abs(tmp3)
tmp5 = 1.0
tmp6 = tmp4 * tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp7 + tmp5
tmp9 = tl_math.log(tmp8)
tmp10 = 0.5
tmp11 = tmp9 * tmp10
tmp12 = tl.broadcast_to(tmp11, [RBLOCK])
tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0))
tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp14, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_abs_add_div_log_mul_neg_pow_sum_0[grid(1)](arg1_1,
arg0_1, buf0, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class CauchyNew(nn.Module):
def __init__(self):
super(CauchyNew, self).__init__()
self.c = 1.0
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| davefiorino/EDSR-PyTorch | Cauchy | false | 1,795 | [
"MIT"
] | 0 | 97ad32a09a71816a36c45d92cdb2ea7ab42ba685 | https://github.com/davefiorino/EDSR-PyTorch/tree/97ad32a09a71816a36c45d92cdb2ea7ab42ba685 | import torch
import torch.nn as nn
import torch.utils.model_zoo
class Model(nn.Module):
def __init__(self):
super().__init__()
self.c = 1.0
def forward(self, X, Y):
r = torch.add(X, -Y)
ra = torch.abs(r)
error = 0.5 * self.c ** 2 * torch.log(1 + (ra / self.c) ** 2)
loss = torch.sum(error)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
Fair | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/lw/clwzdj5o2wngjvfiji4byn4nkytgvelp3l25rhsaxdtqvcqckwrp.py
# Topologically Sorted Source Nodes: [neg, r, ra, truediv, truediv_1, add_1, log, sub, error, loss], Original ATen: [aten.neg, aten.add, aten.abs, aten.div, aten.log, aten.sub, aten.mul, aten.sum]
# Source node to ATen node mapping:
# add_1 => add_1
# error => mul
# log => log
# loss => sum_1
# neg => neg
# r => add
# ra => abs_1
# sub => sub
# truediv => div
# truediv_1 => div_1
# Graph fragment:
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg0_1,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg1_1, %neg), kwargs = {})
# %abs_1 : [num_users=2] = call_function[target=torch.ops.aten.abs.default](args = (%add,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%abs_1, 1.0), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%abs_1, 1.0), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div_1, 1), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_1,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div, %log), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, 1.0), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {})
triton_per_fused_abs_add_div_log_mul_neg_sub_sum_0 = async_compile.triton('triton_per_fused_abs_add_div_log_mul_neg_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_add_div_log_mul_neg_sub_sum_0', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_abs_add_div_log_mul_neg_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = -tmp1
tmp3 = tmp0 + tmp2
tmp4 = tl_math.abs(tmp3)
tmp5 = 1.0
tmp6 = tmp4 * tmp5
tmp7 = tmp6 + tmp5
tmp8 = tl_math.log(tmp7)
tmp9 = tmp6 - tmp8
tmp10 = tmp9 * tmp5
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tl.store(out_ptr0 + (tl.full([1], 0, tl.int32)), tmp13, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [neg, r, ra, truediv, truediv_1, add_1, log, sub, error, loss], Original ATen: [aten.neg, aten.add, aten.abs, aten.div, aten.log, aten.sub, aten.mul, aten.sum]
stream0 = get_raw_stream(0)
triton_per_fused_abs_add_div_log_mul_neg_sub_sum_0.run(arg1_1, arg0_1, buf0, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.model_zoo
class Fair(nn.Module):
def __init__(self):
super(Fair, self).__init__()
self.c = 1.0
def forward(self, X, Y):
r = torch.add(X, -Y)
ra = torch.abs(r)
error = self.c ** 2 * (ra / self.c - torch.log(1 + ra / self.c))
loss = torch.sum(error)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.utils.model_zoo
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_add_div_log_mul_neg_sub_sum_0(in_ptr0, in_ptr1,
out_ptr0, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = -tmp1
tmp3 = tmp0 + tmp2
tmp4 = tl_math.abs(tmp3)
tmp5 = 1.0
tmp6 = tmp4 * tmp5
tmp7 = tmp6 + tmp5
tmp8 = tl_math.log(tmp7)
tmp9 = tmp6 - tmp8
tmp10 = tmp9 * tmp5
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp13, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_abs_add_div_log_mul_neg_sub_sum_0[grid(1)](arg1_1,
arg0_1, buf0, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class FairNew(nn.Module):
def __init__(self):
super(FairNew, self).__init__()
self.c = 1.0
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| davefiorino/EDSR-PyTorch | Fair | false | 1,796 | [
"MIT"
] | 0 | 97ad32a09a71816a36c45d92cdb2ea7ab42ba685 | https://github.com/davefiorino/EDSR-PyTorch/tree/97ad32a09a71816a36c45d92cdb2ea7ab42ba685 | import torch
import torch.nn as nn
import torch.utils.model_zoo
class Model(nn.Module):
def __init__(self):
super().__init__()
self.c = 1.0
def forward(self, X, Y):
r = torch.add(X, -Y)
ra = torch.abs(r)
error = self.c ** 2 * (ra / self.c - torch.log(1 + ra / self.c))
loss = torch.sum(error)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
Charbonnier | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/2r/c2r4vrcqezcb7b3qaarvj7x5e62di3smzewo2zzbig52lkg5xuq4.py
# Topologically Sorted Source Nodes: [neg, diff, mul, add_1, error, loss], Original ATen: [aten.neg, aten.add, aten.mul, aten.sqrt, aten.sum]
# Source node to ATen node mapping:
# add_1 => add_1
# diff => add
# error => sqrt
# loss => sum_1
# mul => mul
# neg => neg
# Graph fragment:
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg0_1,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg1_1, %neg), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, %add), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 1e-06), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_1,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%sqrt,), kwargs = {})
triton_per_fused_add_mul_neg_sqrt_sum_0 = async_compile.triton('triton_per_fused_add_mul_neg_sqrt_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mul_neg_sqrt_sum_0', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_mul_neg_sqrt_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = -tmp1
tmp3 = tmp0 + tmp2
tmp4 = tmp3 * tmp3
tmp5 = 1e-06
tmp6 = tmp4 + tmp5
tmp7 = libdevice.sqrt(tmp6)
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0))
tl.store(out_ptr0 + (tl.full([1], 0, tl.int32)), tmp10, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [neg, diff, mul, add_1, error, loss], Original ATen: [aten.neg, aten.add, aten.mul, aten.sqrt, aten.sum]
stream0 = get_raw_stream(0)
triton_per_fused_add_mul_neg_sqrt_sum_0.run(arg1_1, arg0_1, buf0, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.model_zoo
class Charbonnier(nn.Module):
def __init__(self):
super(Charbonnier, self).__init__()
self.eps = 1e-06
def forward(self, X, Y):
diff = torch.add(X, -Y)
error = torch.sqrt(diff * diff + self.eps)
loss = torch.sum(error)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.utils.model_zoo
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_mul_neg_sqrt_sum_0(in_ptr0, in_ptr1, out_ptr0,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = -tmp1
tmp3 = tmp0 + tmp2
tmp4 = tmp3 * tmp3
tmp5 = 1e-06
tmp6 = tmp4 + tmp5
tmp7 = libdevice.sqrt(tmp6)
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0))
tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp10, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_add_mul_neg_sqrt_sum_0[grid(1)](arg1_1, arg0_1,
buf0, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class CharbonnierNew(nn.Module):
def __init__(self):
super(CharbonnierNew, self).__init__()
self.eps = 1e-06
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| davefiorino/EDSR-PyTorch | Charbonnier | false | 1,797 | [
"MIT"
] | 0 | 97ad32a09a71816a36c45d92cdb2ea7ab42ba685 | https://github.com/davefiorino/EDSR-PyTorch/tree/97ad32a09a71816a36c45d92cdb2ea7ab42ba685 | import torch
import torch.nn as nn
import torch.utils.model_zoo
class Model(nn.Module):
def __init__(self):
super().__init__()
self.eps = 1e-06
def forward(self, X, Y):
diff = torch.add(X, -Y)
error = torch.sqrt(diff * diff + self.eps)
loss = torch.sum(error)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
Classifier | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/o5/co552kg3v4kw54gdfeudwyvihkmby4stltb5nwlp6sgowl67zdjv.py
# Topologically Sorted Source Nodes: [sigmoid, sent_scores], Original ATen: [aten.sigmoid, aten.mul]
# Source node to ATen node mapping:
# sent_scores => mul
# sigmoid => sigmoid
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%squeeze,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %primals_4), kwargs = {})
triton_poi_fused_mul_sigmoid_0 = async_compile.triton('triton_poi_fused_mul_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sigmoid_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + (x2), xmask)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tl.store(out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (1, 4), (4, 1))
assert_size_stride(primals_2, (1, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_1
del primals_2
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sigmoid, sent_scores], Original ATen: [aten.sigmoid, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_sigmoid_0.run(buf1, primals_4, buf2, 256, grid=grid(256), stream=stream0)
return (buf2, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.distributed
import torch
import torch.nn as nn
class Classifier(nn.Module):
def __init__(self, hidden_size):
super(Classifier, self).__init__()
self.linear1 = nn.Linear(hidden_size, 1)
self.sigmoid = nn.Sigmoid()
def forward(self, x, mask_cls):
h = self.linear1(x).squeeze(-1)
sent_scores = self.sigmoid(h) * mask_cls.float()
return sent_scores
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.distributed
import torch
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_sigmoid_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x2 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + x2, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tl.store(out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (1, 4), (4, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_1
del primals_2
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_sigmoid_0[grid(256)](buf1, primals_4, buf2,
256, XBLOCK=256, num_warps=4, num_stages=1)
return buf2, primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf1
class ClassifierNew(nn.Module):
def __init__(self, hidden_size):
super(ClassifierNew, self).__init__()
self.linear1 = nn.Linear(hidden_size, 1)
self.sigmoid = nn.Sigmoid()
def forward(self, input_0, input_1):
primals_1 = self.linear1.weight
primals_2 = self.linear1.bias
primals_3 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| dat821168/PreSumm | Classifier | false | 1,798 | [
"MIT"
] | 0 | 3c84fc97f50a193a865ccef2300adf5683397539 | https://github.com/dat821168/PreSumm/tree/3c84fc97f50a193a865ccef2300adf5683397539 | import torch
import torch.distributed
import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, hidden_size):
super().__init__()
self.linear1 = nn.Linear(hidden_size, 1)
self.sigmoid = nn.Sigmoid()
def forward(self, x, mask_cls):
h = self.linear1(x).squeeze(-1)
sent_scores = self.sigmoid(h) * mask_cls.float()
return sent_scores
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
CustomBatchNormManualModule | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/t4/ct4t2fmlrmfxptdiid3i6pgnapm2liftmal2stemtgxuxoou5pcw.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.mean, aten.std, aten.mul, aten.add, aten.sqrt, aten.sub, aten.div]
# Source node to ATen node mapping:
# out => add, add_1, div, mean, mul, mul_1, sqrt, sqrt_1, sub, var
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [0]), kwargs = {})
# %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%primals_1, [0]), kwargs = {correction: 0.0})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%var,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sqrt, %sqrt), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 1e-05), kwargs = {})
# %sqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %mean), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %sqrt_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %div), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_3), kwargs = {})
triton_poi_fused_add_div_mean_mul_sqrt_std_sub_0 = async_compile.triton('triton_poi_fused_add_div_mean_mul_sqrt_std_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_sqrt_std_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mean_mul_sqrt_std_sub_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = 4.0
tmp10 = tmp8 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp2 - tmp10
tmp13 = tmp12 * tmp12
tmp14 = tmp3 - tmp10
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp10
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp7 - tmp10
tmp21 = tmp20 * tmp20
tmp22 = tmp19 + tmp21
tmp23 = tmp22 / tmp9
tmp24 = libdevice.sqrt(tmp23)
tmp25 = tmp24 * tmp24
tmp26 = 1e-05
tmp27 = tmp25 + tmp26
tmp28 = libdevice.sqrt(tmp27)
tmp29 = tmp11 / tmp28
tmp30 = tmp0 * tmp29
tmp32 = tmp30 + tmp31
tl.store(in_out_ptr0 + (x2), tmp32, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.mean, aten.std, aten.mul, aten.add, aten.sqrt, aten.sub, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_mean_mul_sqrt_std_sub_0.run(buf1, primals_2, primals_1, primals_3, 16, grid=grid(16), stream=stream0)
del primals_2
del primals_3
return (buf1, primals_1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class CustomBatchNormManualFunction(torch.autograd.Function):
"""
This torch.autograd.Function implements a functional custom version of the batch norm operation for MLPs.
Using torch.autograd.Function allows you to write a custom backward function.
The function will be called from the nn.Module CustomBatchNormManualModule
Inside forward the tensors are (automatically) not recorded for automatic differentiation since the backward
pass is done via the backward method.
The forward pass is not called directly but via the apply() method. This makes sure that the context objects
are dealt with correctly. Example:
my_bn_fct = CustomBatchNormManualFunction()
normalized = fct.apply(input, gamma, beta, eps)
"""
@staticmethod
def forward(ctx, input, gamma, beta, eps=1e-05):
"""
Compute the batch normalization
Args:
ctx: context object handling storing and retrival of tensors and constants and specifying
whether tensors need gradients in backward pass
input: input tensor of shape (n_batch, n_neurons)
gamma: variance scaling tensor, applied per neuron, shpae (n_neurons)
beta: mean bias tensor, applied per neuron, shpae (n_neurons)
eps: small float added to the variance for stability
Returns:
out: batch-normalized tensor
TODO:
Implement the forward pass of batch normalization
Store constant non-tensor objects via ctx.constant=myconstant
Store tensors which you need in the backward pass via ctx.save_for_backward(tensor1, tensor2, ...)
Intermediate results can be decided to be either recomputed in the backward pass or to be stored
for the backward pass. Do not store tensors which are unnecessary for the backward pass to save memory!
For the case that you make use of torch.var be aware that the flag unbiased=False should be set.
"""
x = input
mu = x.mean(dim=0)
std = x.std(dim=0, unbiased=False)
var = torch.sqrt(std * std + eps)
x_centered = x - mu
x_hat = x_centered / var
out = gamma * x_hat + beta
ctx.save_for_backward(x, mu, std, var, gamma)
return out
@staticmethod
def backward(ctx, grad_output):
"""
Compute backward pass of the batch normalization.
Args:
ctx: context object handling storing and retrival of tensors and constants and specifying
whether tensors need gradients in backward pass
Returns:
out: tuple containing gradients for all input arguments
TODO:
Retrieve saved tensors and constants via ctx.saved_tensors and ctx.constant
Compute gradients for inputs where ctx.needs_input_grad[idx] is True. Set gradients for other
inputs to None. This should be decided dynamically.
"""
x, mu, _std, var, gamma = ctx.saved_tensors
grad_input, grad_gamma, grad_beta = None, None, None
x_hat = (x - mu) / var
if ctx.needs_input_grad[2]:
grad_beta = grad_output.sum(0)
if ctx.needs_input_grad[1]:
grad_gamma = (grad_output * x_hat).sum(0)
if ctx.needs_input_grad[0]:
c1 = grad_output.mean(dim=0)
c2 = (grad_output * x_hat).mean(dim=0)
grad_input = gamma / var * (grad_output - c1 - x_hat * c2)
return grad_input, grad_gamma, grad_beta, None
class CustomBatchNormManualModule(nn.Module):
"""
This nn.module implements a custom version of the batch norm operation for MLPs.
In self.forward the functional version CustomBatchNormManualFunction.forward is called.
The automatic differentiation of PyTorch calls the backward method of this function in the backward pass.
"""
def __init__(self, n_neurons, eps=1e-05):
"""
Initializes CustomBatchNormManualModule object.
Args:
n_neurons: int specifying the number of neurons
eps: small float to be added to the variance for stability
TODO:
Save parameters for the number of neurons and eps.
Initialize parameters gamma and beta via nn.Parameter
"""
super(CustomBatchNormManualModule, self).__init__()
self.n_neurons = n_neurons
self.eps = eps
self.gammas = torch.nn.Parameter(torch.ones(n_neurons))
self.betas = torch.nn.Parameter(torch.zeros(n_neurons))
def forward(self, input):
"""
Compute the batch normalization via CustomBatchNormManualFunction
Args:
input: input tensor of shape (n_batch, n_neurons)
Returns:
out: batch-normalized tensor
TODO:
Check for the correctness of the shape of the input tensor.
Instantiate a CustomBatchNormManualFunction.
Call it via its .apply() method.
"""
shape = input.size()
if len(shape) == 1:
input.view(1, -1)
shape = input.size()
if len(shape) > 2:
raise ValueError('Expected 1-D or 2-D tensor (got {})'.format(
str(shape)))
elif input.shape[1] != self.n_neurons:
raise ValueError('Expected _ x {} tensor (got {} x {})'.format(
str(self.n_neurons), str(shape[0]), str(shape[1])))
fct = CustomBatchNormManualFunction()
out = fct.apply(input, self.gammas, self.betas, self.eps)
return out
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'n_neurons': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mean_mul_sqrt_std_sub_0(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = 4.0
tmp10 = tmp8 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp2 - tmp10
tmp13 = tmp12 * tmp12
tmp14 = tmp3 - tmp10
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp10
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp7 - tmp10
tmp21 = tmp20 * tmp20
tmp22 = tmp19 + tmp21
tmp23 = tmp22 / tmp9
tmp24 = libdevice.sqrt(tmp23)
tmp25 = tmp24 * tmp24
tmp26 = 1e-05
tmp27 = tmp25 + tmp26
tmp28 = libdevice.sqrt(tmp27)
tmp29 = tmp11 / tmp28
tmp30 = tmp0 * tmp29
tmp32 = tmp30 + tmp31
tl.store(in_out_ptr0 + x2, tmp32, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_add_div_mean_mul_sqrt_std_sub_0[grid(16)](buf1,
primals_2, primals_1, primals_3, 16, XBLOCK=16, num_warps=1,
num_stages=1)
del primals_2
del primals_3
return buf1, primals_1
class CustomBatchNormManualFunction(torch.autograd.Function):
"""
This torch.autograd.Function implements a functional custom version of the batch norm operation for MLPs.
Using torch.autograd.Function allows you to write a custom backward function.
The function will be called from the nn.Module CustomBatchNormManualModule
Inside forward the tensors are (automatically) not recorded for automatic differentiation since the backward
pass is done via the backward method.
The forward pass is not called directly but via the apply() method. This makes sure that the context objects
are dealt with correctly. Example:
my_bn_fct = CustomBatchNormManualFunction()
normalized = fct.apply(input, gamma, beta, eps)
"""
@staticmethod
def forward(ctx, input, gamma, beta, eps=1e-05):
"""
Compute the batch normalization
Args:
ctx: context object handling storing and retrival of tensors and constants and specifying
whether tensors need gradients in backward pass
input: input tensor of shape (n_batch, n_neurons)
gamma: variance scaling tensor, applied per neuron, shpae (n_neurons)
beta: mean bias tensor, applied per neuron, shpae (n_neurons)
eps: small float added to the variance for stability
Returns:
out: batch-normalized tensor
TODO:
Implement the forward pass of batch normalization
Store constant non-tensor objects via ctx.constant=myconstant
Store tensors which you need in the backward pass via ctx.save_for_backward(tensor1, tensor2, ...)
Intermediate results can be decided to be either recomputed in the backward pass or to be stored
for the backward pass. Do not store tensors which are unnecessary for the backward pass to save memory!
For the case that you make use of torch.var be aware that the flag unbiased=False should be set.
"""
x = input
mu = x.mean(dim=0)
std = x.std(dim=0, unbiased=False)
var = torch.sqrt(std * std + eps)
x_centered = x - mu
x_hat = x_centered / var
out = gamma * x_hat + beta
ctx.save_for_backward(x, mu, std, var, gamma)
return out
@staticmethod
def backward(ctx, grad_output):
"""
Compute backward pass of the batch normalization.
Args:
ctx: context object handling storing and retrival of tensors and constants and specifying
whether tensors need gradients in backward pass
Returns:
out: tuple containing gradients for all input arguments
TODO:
Retrieve saved tensors and constants via ctx.saved_tensors and ctx.constant
Compute gradients for inputs where ctx.needs_input_grad[idx] is True. Set gradients for other
inputs to None. This should be decided dynamically.
"""
x, mu, _std, var, gamma = ctx.saved_tensors
grad_input, grad_gamma, grad_beta = None, None, None
x_hat = (x - mu) / var
if ctx.needs_input_grad[2]:
grad_beta = grad_output.sum(0)
if ctx.needs_input_grad[1]:
grad_gamma = (grad_output * x_hat).sum(0)
if ctx.needs_input_grad[0]:
c1 = grad_output.mean(dim=0)
c2 = (grad_output * x_hat).mean(dim=0)
grad_input = gamma / var * (grad_output - c1 - x_hat * c2)
return grad_input, grad_gamma, grad_beta, None
class CustomBatchNormManualModuleNew(nn.Module):
"""
This nn.module implements a custom version of the batch norm operation for MLPs.
In self.forward the functional version CustomBatchNormManualFunction.forward is called.
The automatic differentiation of PyTorch calls the backward method of this function in the backward pass.
"""
def __init__(self, n_neurons, eps=1e-05):
"""
Initializes CustomBatchNormManualModule object.
Args:
n_neurons: int specifying the number of neurons
eps: small float to be added to the variance for stability
TODO:
Save parameters for the number of neurons and eps.
Initialize parameters gamma and beta via nn.Parameter
"""
super(CustomBatchNormManualModuleNew, self).__init__()
self.n_neurons = n_neurons
self.eps = eps
self.gammas = torch.nn.Parameter(torch.ones(n_neurons))
self.betas = torch.nn.Parameter(torch.zeros(n_neurons))
def forward(self, input_0):
primals_2 = self.gammas
primals_3 = self.betas
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| davide-belli/deep-learning-labs | CustomBatchNormManualModule | false | 1,799 | [
"MIT"
] | 0 | 1acd37a527711dccdc00c1935724cc5de7c10955 | https://github.com/davide-belli/deep-learning-labs/tree/1acd37a527711dccdc00c1935724cc5de7c10955 | import torch
import torch.nn as nn
class CustomBatchNormManualFunction(torch.autograd.Function):
"""
This torch.autograd.Function implements a functional custom version of the batch norm operation for MLPs.
Using torch.autograd.Function allows you to write a custom backward function.
The function will be called from the nn.Module CustomBatchNormManualModule
Inside forward the tensors are (automatically) not recorded for automatic differentiation since the backward
pass is done via the backward method.
The forward pass is not called directly but via the apply() method. This makes sure that the context objects
are dealt with correctly. Example:
my_bn_fct = CustomBatchNormManualFunction()
normalized = fct.apply(input, gamma, beta, eps)
"""
@staticmethod
def forward(ctx, input, gamma, beta, eps=1e-05):
"""
Compute the batch normalization
Args:
ctx: context object handling storing and retrival of tensors and constants and specifying
whether tensors need gradients in backward pass
input: input tensor of shape (n_batch, n_neurons)
gamma: variance scaling tensor, applied per neuron, shpae (n_neurons)
beta: mean bias tensor, applied per neuron, shpae (n_neurons)
eps: small float added to the variance for stability
Returns:
out: batch-normalized tensor
TODO:
Implement the forward pass of batch normalization
Store constant non-tensor objects via ctx.constant=myconstant
Store tensors which you need in the backward pass via ctx.save_for_backward(tensor1, tensor2, ...)
Intermediate results can be decided to be either recomputed in the backward pass or to be stored
for the backward pass. Do not store tensors which are unnecessary for the backward pass to save memory!
For the case that you make use of torch.var be aware that the flag unbiased=False should be set.
"""
x = input
mu = x.mean(dim=0)
std = x.std(dim=0, unbiased=False)
var = torch.sqrt(std * std + eps)
x_centered = x - mu
x_hat = x_centered / var
out = gamma * x_hat + beta
ctx.save_for_backward(x, mu, std, var, gamma)
return out
@staticmethod
def backward(ctx, grad_output):
"""
Compute backward pass of the batch normalization.
Args:
ctx: context object handling storing and retrival of tensors and constants and specifying
whether tensors need gradients in backward pass
Returns:
out: tuple containing gradients for all input arguments
TODO:
Retrieve saved tensors and constants via ctx.saved_tensors and ctx.constant
Compute gradients for inputs where ctx.needs_input_grad[idx] is True. Set gradients for other
inputs to None. This should be decided dynamically.
"""
x, mu, _std, var, gamma = ctx.saved_tensors
grad_input, grad_gamma, grad_beta = None, None, None
x_hat = (x - mu) / var
if ctx.needs_input_grad[2]:
grad_beta = grad_output.sum(0)
if ctx.needs_input_grad[1]:
grad_gamma = (grad_output * x_hat).sum(0)
if ctx.needs_input_grad[0]:
c1 = grad_output.mean(dim=0)
c2 = (grad_output * x_hat).mean(dim=0)
grad_input = gamma / var * (grad_output - c1 - x_hat * c2)
return grad_input, grad_gamma, grad_beta, None
class Model(nn.Module):
"""
This nn.module implements a custom version of the batch norm operation for MLPs.
In self.forward the functional version CustomBatchNormManualFunction.forward is called.
The automatic differentiation of PyTorch calls the backward method of this function in the backward pass.
"""
def __init__(self, n_neurons, eps=1e-05):
"""
Initializes CustomB
# ... truncated (>4000 chars) for memory efficiency |
Discriminator | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/qz/cqzqewrslhmmtznbubdptcl3g644vt5nx5zmpemvecw7yxy4owit.py
# Topologically Sorted Source Nodes: [leaky_relu], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# leaky_relu => gt, mul, where
# Graph fragment:
# %add_tensor_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_2, %primals_3), kwargs = {})
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_tensor_2, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_tensor_2, 0.2), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %add_tensor_2, %mul), kwargs = {})
triton_poi_fused_leaky_relu_0 = async_compile.triton('triton_poi_fused_leaky_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr1 + (x2), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/x3/cx3lvsfjgyomafwjbqsn4akqyj6ctr5w75lrwgb7kifdqfrbvs23.py
# Topologically Sorted Source Nodes: [leaky_relu_1], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# leaky_relu_1 => gt_1, mul_1, where_1
# Graph fragment:
# %add_tensor_1 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_5), kwargs = {})
# %gt_1 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_tensor_1, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_tensor_1, 0.2), kwargs = {})
# %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %add_tensor_1, %mul_1), kwargs = {})
triton_poi_fused_leaky_relu_1 = async_compile.triton('triton_poi_fused_leaky_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr1 + (x2), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/b7/cb7iq44xucvx4o4uio3etz5hrrkllxx5igr3vjyglpwcku6mi232.py
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# out_3 => sigmoid
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_7), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_sigmoid_2 = async_compile.triton('triton_poi_fused_sigmoid_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 784), (784, 1))
assert_size_stride(primals_2, (512, 784), (784, 1))
assert_size_stride(primals_3, (512, ), (1, ))
assert_size_stride(primals_4, (256, 512), (512, 1))
assert_size_stride(primals_5, (256, ), (1, ))
assert_size_stride(primals_6, (1, 256), (256, 1))
assert_size_stride(primals_7, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 512), (512, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (784, 512), (1, 784), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 512), (512, 1), torch.bool)
buf2 = empty_strided_cuda((4, 512), (512, 1), torch.float32)
# Topologically Sorted Source Nodes: [leaky_relu], Original ATen: [aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_poi_fused_leaky_relu_0.run(buf0, primals_3, buf1, buf2, 2048, grid=grid(2048), stream=stream0)
del buf0
del primals_3
buf3 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (512, 256), (1, 512), 0), out=buf3)
buf4 = empty_strided_cuda((4, 256), (256, 1), torch.bool)
buf5 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
# Topologically Sorted Source Nodes: [leaky_relu_1], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_1.run(buf3, primals_5, buf4, buf5, 1024, grid=grid(1024), stream=stream0)
del buf3
del primals_5
buf6 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf5, reinterpret_tensor(primals_6, (256, 1), (1, 256), 0), out=buf6)
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.sigmoid]
triton_poi_fused_sigmoid_2.run(buf7, primals_7, 4, grid=grid(4), stream=stream0)
del primals_7
return (buf7, primals_1, buf1, buf2, buf4, buf5, buf7, primals_6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 784), (784, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((512, 784), (784, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((256, 512), (512, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, 256), (256, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
self.linear1 = nn.Linear(784, 512)
self.lrelu2 = nn.LeakyReLU(0.2)
self.linear2 = nn.Linear(512, 256)
self.lrelu3 = nn.LeakyReLU(0.2)
self.linear3 = nn.Linear(256, 1)
self.sigmoid = nn.Sigmoid()
def forward(self, img):
out = self.linear1(img.view(img.shape[0], 784))
out = self.linear2(self.lrelu2(out))
out = self.linear3(self.lrelu3(out))
out = self.sigmoid(out)
return out
def get_inputs():
return [torch.rand([4, 784])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_ptr0 + x2, None)
tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, None)
tl.store(out_ptr1 + x2, tmp7, None)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
@triton.jit
def triton_poi_fused_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 784), (784, 1))
assert_size_stride(primals_2, (512, 784), (784, 1))
assert_size_stride(primals_3, (512,), (1,))
assert_size_stride(primals_4, (256, 512), (512, 1))
assert_size_stride(primals_5, (256,), (1,))
assert_size_stride(primals_6, (1, 256), (256, 1))
assert_size_stride(primals_7, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 512), (512, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (784,
512), (1, 784), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 512), (512, 1), torch.bool)
buf2 = empty_strided_cuda((4, 512), (512, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_leaky_relu_0[grid(2048)](buf0, primals_3, buf1,
buf2, 2048, XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del primals_3
buf3 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (512, 256), (
1, 512), 0), out=buf3)
buf4 = empty_strided_cuda((4, 256), (256, 1), torch.bool)
buf5 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
triton_poi_fused_leaky_relu_1[grid(1024)](buf3, primals_5, buf4,
buf5, 1024, XBLOCK=128, num_warps=4, num_stages=1)
del buf3
del primals_5
buf6 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.mm(buf5, reinterpret_tensor(primals_6, (256, 1), (1,
256), 0), out=buf6)
buf7 = buf6
del buf6
triton_poi_fused_sigmoid_2[grid(4)](buf7, primals_7, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del primals_7
return buf7, primals_1, buf1, buf2, buf4, buf5, buf7, primals_6, primals_4
class DiscriminatorNew(nn.Module):
def __init__(self):
super(DiscriminatorNew, self).__init__()
self.linear1 = nn.Linear(784, 512)
self.lrelu2 = nn.LeakyReLU(0.2)
self.linear2 = nn.Linear(512, 256)
self.lrelu3 = nn.LeakyReLU(0.2)
self.linear3 = nn.Linear(256, 1)
self.sigmoid = nn.Sigmoid()
def forward(self, input_0):
primals_2 = self.linear1.weight
primals_3 = self.linear1.bias
primals_4 = self.linear2.weight
primals_5 = self.linear2.bias
primals_6 = self.linear3.weight
primals_7 = self.linear3.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| davide-belli/deep-learning-labs | Discriminator | false | 1,800 | [
"MIT"
] | 0 | 1acd37a527711dccdc00c1935724cc5de7c10955 | https://github.com/davide-belli/deep-learning-labs/tree/1acd37a527711dccdc00c1935724cc5de7c10955 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self):
super().__init__()
self.linear1 = nn.Linear(784, 512)
self.lrelu2 = nn.LeakyReLU(0.2)
self.linear2 = nn.Linear(512, 256)
self.lrelu3 = nn.LeakyReLU(0.2)
self.linear3 = nn.Linear(256, 1)
self.sigmoid = nn.Sigmoid()
def forward(self, img):
out = self.linear1(img.view(img.shape[0], 784))
out = self.linear2(self.lrelu2(out))
out = self.linear3(self.lrelu3(out))
out = self.sigmoid(out)
return out
def get_inputs():
return [torch.rand([4, 784])]
def get_init_inputs():
return []
|
ConcatPool2d | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/dy/cdydayzef2fxcnqukm4em4cj2latyjpphw5luwkneuqlp4m5b7zg.py
# Topologically Sorted Source Nodes: [max_pool2d, avg_pool2d], Original ATen: [aten.max_pool2d_with_indices, aten.avg_pool2d]
# Source node to ATen node mapping:
# avg_pool2d => avg_pool2d
# max_pool2d => _low_memory_max_pool2d_with_offsets
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets : [num_users=1] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%arg0_1, [4, 4], [4, 4], [0, 0], [1, 1], False), kwargs = {})
# %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%arg0_1, [4, 4], [4, 4]), kwargs = {})
triton_poi_fused_avg_pool2d_max_pool2d_with_indices_0 = async_compile.triton('triton_poi_fused_avg_pool2d_max_pool2d_with_indices_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_max_pool2d_with_indices_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_avg_pool2d_max_pool2d_with_indices_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (16*x2), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (16*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (16*x2)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (16*x2)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (4 + (16*x2)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (5 + (16*x2)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (6 + (16*x2)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (7 + (16*x2)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (8 + (16*x2)), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (9 + (16*x2)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (10 + (16*x2)), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr0 + (11 + (16*x2)), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (12 + (16*x2)), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr0 + (13 + (16*x2)), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr0 + (14 + (16*x2)), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr0 + (15 + (16*x2)), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp20 = triton_helpers.maximum(tmp19, tmp18)
tmp22 = triton_helpers.maximum(tmp21, tmp20)
tmp24 = triton_helpers.maximum(tmp23, tmp22)
tmp26 = triton_helpers.maximum(tmp25, tmp24)
tmp28 = triton_helpers.maximum(tmp27, tmp26)
tmp30 = triton_helpers.maximum(tmp29, tmp28)
tmp31 = tmp1 + tmp0
tmp32 = tmp3 + tmp31
tmp33 = tmp5 + tmp32
tmp34 = tmp7 + tmp33
tmp35 = tmp9 + tmp34
tmp36 = tmp11 + tmp35
tmp37 = tmp13 + tmp36
tmp38 = tmp15 + tmp37
tmp39 = tmp17 + tmp38
tmp40 = tmp19 + tmp39
tmp41 = tmp21 + tmp40
tmp42 = tmp23 + tmp41
tmp43 = tmp25 + tmp42
tmp44 = tmp27 + tmp43
tmp45 = tmp29 + tmp44
tmp46 = 0.0625
tmp47 = tmp45 * tmp46
tl.store(out_ptr0 + (x0 + (8*x1)), tmp30, xmask)
tl.store(out_ptr1 + (x0 + (8*x1)), tmp47, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 1, 1), torch.float32)
buf0 = reinterpret_tensor(buf2, (4, 4, 1, 1), (8, 1, 1, 1), 0) # alias
buf1 = reinterpret_tensor(buf2, (4, 4, 1, 1), (8, 1, 1, 1), 4) # alias
# Topologically Sorted Source Nodes: [max_pool2d, avg_pool2d], Original ATen: [aten.max_pool2d_with_indices, aten.avg_pool2d]
stream0 = get_raw_stream(0)
triton_poi_fused_avg_pool2d_max_pool2d_with_indices_0.run(arg0_1, buf0, buf1, 16, grid=grid(16), stream=stream0)
del arg0_1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class ConcatPool2d(nn.Module):
"""Layer that concats `AvgPool2d` and `MaxPool2d`"""
def __init__(self, ks, stride=None, padding=0):
super().__init__()
self.ap = nn.AvgPool2d(ks, stride, padding)
self.mp = nn.MaxPool2d(ks, stride, padding)
def forward(self, x):
return torch.cat([self.mp(x), self.ap(x)], 1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'ks': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_avg_pool2d_max_pool2d_with_indices_0(in_ptr0, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + 16 * x2, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 16 * x2), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (2 + 16 * x2), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr0 + (3 + 16 * x2), xmask, eviction_policy='evict_last'
)
tmp7 = tl.load(in_ptr0 + (4 + 16 * x2), xmask, eviction_policy='evict_last'
)
tmp9 = tl.load(in_ptr0 + (5 + 16 * x2), xmask, eviction_policy='evict_last'
)
tmp11 = tl.load(in_ptr0 + (6 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp13 = tl.load(in_ptr0 + (7 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr0 + (8 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp17 = tl.load(in_ptr0 + (9 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr0 + (10 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp21 = tl.load(in_ptr0 + (11 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr0 + (12 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp25 = tl.load(in_ptr0 + (13 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp27 = tl.load(in_ptr0 + (14 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp29 = tl.load(in_ptr0 + (15 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp20 = triton_helpers.maximum(tmp19, tmp18)
tmp22 = triton_helpers.maximum(tmp21, tmp20)
tmp24 = triton_helpers.maximum(tmp23, tmp22)
tmp26 = triton_helpers.maximum(tmp25, tmp24)
tmp28 = triton_helpers.maximum(tmp27, tmp26)
tmp30 = triton_helpers.maximum(tmp29, tmp28)
tmp31 = tmp1 + tmp0
tmp32 = tmp3 + tmp31
tmp33 = tmp5 + tmp32
tmp34 = tmp7 + tmp33
tmp35 = tmp9 + tmp34
tmp36 = tmp11 + tmp35
tmp37 = tmp13 + tmp36
tmp38 = tmp15 + tmp37
tmp39 = tmp17 + tmp38
tmp40 = tmp19 + tmp39
tmp41 = tmp21 + tmp40
tmp42 = tmp23 + tmp41
tmp43 = tmp25 + tmp42
tmp44 = tmp27 + tmp43
tmp45 = tmp29 + tmp44
tmp46 = 0.0625
tmp47 = tmp45 * tmp46
tl.store(out_ptr0 + (x0 + 8 * x1), tmp30, xmask)
tl.store(out_ptr1 + (x0 + 8 * x1), tmp47, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 1, 1), torch.float32)
buf0 = reinterpret_tensor(buf2, (4, 4, 1, 1), (8, 1, 1, 1), 0)
buf1 = reinterpret_tensor(buf2, (4, 4, 1, 1), (8, 1, 1, 1), 4)
get_raw_stream(0)
triton_poi_fused_avg_pool2d_max_pool2d_with_indices_0[grid(16)](arg0_1,
buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1)
del arg0_1
return buf2,
class ConcatPool2dNew(nn.Module):
"""Layer that concats `AvgPool2d` and `MaxPool2d`"""
def __init__(self, ks, stride=None, padding=0):
super().__init__()
self.ap = nn.AvgPool2d(ks, stride, padding)
self.mp = nn.MaxPool2d(ks, stride, padding)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| davidleonfdez/face2anime | ConcatPool2d | false | 1,801 | [
"MIT"
] | 0 | 896bf85a7aa28322cc9e9e586685db8cbbf39d89 | https://github.com/davidleonfdez/face2anime/tree/896bf85a7aa28322cc9e9e586685db8cbbf39d89 | import torch
import torch.nn as nn
class Model(nn.Module):
"""Layer that concats `AvgPool2d` and `MaxPool2d`"""
def __init__(self, ks, stride=None, padding=0):
super().__init__()
self.ap = nn.AvgPool2d(ks, stride, padding)
self.mp = nn.MaxPool2d(ks, stride, padding)
def forward(self, x):
return torch.cat([self.mp(x), self.ap(x)], 1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
CustomBatchNormAutograd | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/fy/cfyu3frphog3iitdcz5tgawpqhvmxx3pgmb3qcnek46yuarja7tl.py
# Topologically Sorted Source Nodes: [mu, std, sub, mul, add, sqrt, x_hat, mul_1, out], Original ATen: [aten.mean, aten.std, aten.sub, aten.mul, aten.add, aten.sqrt, aten.div]
# Source node to ATen node mapping:
# add => add
# mu => mean
# mul => mul
# mul_1 => mul_1
# out => add_1
# sqrt => sqrt_1
# std => sqrt, var
# sub => sub
# x_hat => div
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [0]), kwargs = {})
# %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%primals_1, [0]), kwargs = {correction: 1.0})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%var,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %mean), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sqrt, %sqrt), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 1e-05), kwargs = {})
# %sqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %sqrt_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %div), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_3), kwargs = {})
triton_poi_fused_add_div_mean_mul_sqrt_std_sub_0 = async_compile.triton('triton_poi_fused_add_div_mean_mul_sqrt_std_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_sqrt_std_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mean_mul_sqrt_std_sub_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last')
tmp32 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = 4.0
tmp10 = tmp8 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp2 - tmp10
tmp13 = tmp12 * tmp12
tmp14 = tmp3 - tmp10
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp10
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp7 - tmp10
tmp21 = tmp20 * tmp20
tmp22 = tmp19 + tmp21
tmp23 = 3.0
tmp24 = tmp22 / tmp23
tmp25 = libdevice.sqrt(tmp24)
tmp26 = tmp25 * tmp25
tmp27 = 1e-05
tmp28 = tmp26 + tmp27
tmp29 = libdevice.sqrt(tmp28)
tmp30 = tmp11 / tmp29
tmp31 = tmp0 * tmp30
tmp33 = tmp31 + tmp32
tl.store(in_out_ptr0 + (x2), tmp33, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [mu, std, sub, mul, add, sqrt, x_hat, mul_1, out], Original ATen: [aten.mean, aten.std, aten.sub, aten.mul, aten.add, aten.sqrt, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_mean_mul_sqrt_std_sub_0.run(buf1, primals_2, primals_1, primals_3, 16, grid=grid(16), stream=stream0)
del primals_2
del primals_3
return (buf1, primals_1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class CustomBatchNormAutograd(nn.Module):
"""
This nn.module implements a custom version of the batch norm operation for MLPs.
The operations called in self.forward track the history if the input tensors have the
flag requires_grad set to True. The backward pass does not need to be implemented, it
is dealt with by the automatic differentiation provided by PyTorch.
"""
def __init__(self, n_neurons, eps=1e-05):
"""
Initializes CustomBatchNormAutograd object.
Args:
n_neurons: int specifying the number of neurons
eps: small float to be added to the variance for stability
TODO:
Save parameters for the number of neurons and eps.
Initialize parameters gamma and beta via nn.Parameter
"""
super(CustomBatchNormAutograd, self).__init__()
self.n_neurons = n_neurons
self.eps = eps
self.gammas = torch.nn.Parameter(torch.ones(n_neurons))
self.betas = torch.nn.Parameter(torch.zeros(n_neurons))
def forward(self, input):
"""
Compute the batch normalization
Args:
input: input tensor of shape (n_batch, n_neurons)
Returns:
out: batch-normalized tensor
TODO:
Check for the correctness of the shape of the input tensor.
Implement batch normalization forward pass as given in the assignment.
For the case that you make use of torch.var be aware that the flag unbiased=False should be set.
"""
shape = input.size()
if len(shape) == 1:
input.view(1, -1)
shape = input.size()
if len(shape) > 2:
raise ValueError('Expected 1-D or 2-D tensor (got {})'.format(
str(shape)))
elif input.shape[1] != self.n_neurons:
raise ValueError('Expected _ x {} tensor (got {} x {})'.format(
str(self.n_neurons), str(shape[0]), str(shape[1])))
x = input
mu = x.mean(dim=0)
std = x.std(dim=0)
x_hat = (x - mu) / torch.sqrt(std * std + self.eps)
out = self.gammas * x_hat + self.betas
return out
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'n_neurons': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mean_mul_sqrt_std_sub_0(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last')
tmp32 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = 4.0
tmp10 = tmp8 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp2 - tmp10
tmp13 = tmp12 * tmp12
tmp14 = tmp3 - tmp10
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp10
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp7 - tmp10
tmp21 = tmp20 * tmp20
tmp22 = tmp19 + tmp21
tmp23 = 3.0
tmp24 = tmp22 / tmp23
tmp25 = libdevice.sqrt(tmp24)
tmp26 = tmp25 * tmp25
tmp27 = 1e-05
tmp28 = tmp26 + tmp27
tmp29 = libdevice.sqrt(tmp28)
tmp30 = tmp11 / tmp29
tmp31 = tmp0 * tmp30
tmp33 = tmp31 + tmp32
tl.store(in_out_ptr0 + x2, tmp33, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_add_div_mean_mul_sqrt_std_sub_0[grid(16)](buf1,
primals_2, primals_1, primals_3, 16, XBLOCK=16, num_warps=1,
num_stages=1)
del primals_2
del primals_3
return buf1, primals_1
class CustomBatchNormAutogradNew(nn.Module):
"""
This nn.module implements a custom version of the batch norm operation for MLPs.
The operations called in self.forward track the history if the input tensors have the
flag requires_grad set to True. The backward pass does not need to be implemented, it
is dealt with by the automatic differentiation provided by PyTorch.
"""
def __init__(self, n_neurons, eps=1e-05):
"""
Initializes CustomBatchNormAutograd object.
Args:
n_neurons: int specifying the number of neurons
eps: small float to be added to the variance for stability
TODO:
Save parameters for the number of neurons and eps.
Initialize parameters gamma and beta via nn.Parameter
"""
super(CustomBatchNormAutogradNew, self).__init__()
self.n_neurons = n_neurons
self.eps = eps
self.gammas = torch.nn.Parameter(torch.ones(n_neurons))
self.betas = torch.nn.Parameter(torch.zeros(n_neurons))
def forward(self, input_0):
primals_2 = self.gammas
primals_3 = self.betas
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| davide-belli/deep-learning-labs | CustomBatchNormAutograd | false | 1,802 | [
"MIT"
] | 0 | 1acd37a527711dccdc00c1935724cc5de7c10955 | https://github.com/davide-belli/deep-learning-labs/tree/1acd37a527711dccdc00c1935724cc5de7c10955 | import torch
import torch.nn as nn
class Model(nn.Module):
"""
This nn.module implements a custom version of the batch norm operation for MLPs.
The operations called in self.forward track the history if the input tensors have the
flag requires_grad set to True. The backward pass does not need to be implemented, it
is dealt with by the automatic differentiation provided by PyTorch.
"""
def __init__(self, n_neurons, eps=1e-05):
"""
Initializes CustomBatchNormAutograd object.
Args:
n_neurons: int specifying the number of neurons
eps: small float to be added to the variance for stability
TODO:
Save parameters for the number of neurons and eps.
Initialize parameters gamma and beta via nn.Parameter
"""
super().__init__()
self.n_neurons = n_neurons
self.eps = eps
self.gammas = torch.nn.Parameter(torch.ones(n_neurons))
self.betas = torch.nn.Parameter(torch.zeros(n_neurons))
def forward(self, input):
"""
Compute the batch normalization
Args:
input: input tensor of shape (n_batch, n_neurons)
Returns:
out: batch-normalized tensor
TODO:
Check for the correctness of the shape of the input tensor.
Implement batch normalization forward pass as given in the assignment.
For the case that you make use of torch.var be aware that the flag unbiased=False should be set.
"""
shape = input.size()
if len(shape) == 1:
input.view(1, -1)
shape = input.size()
if len(shape) > 2:
raise ValueError('Expected 1-D or 2-D tensor (got {})'.format(
str(shape)))
elif input.shape[1] != self.n_neurons:
raise ValueError('Expected _ x {} tensor (got {} x {})'.format(
str(self.n_neurons), str(shape[0]), str(shape[1])))
x = input
mu = x.mean(dim=0)
std = x.std(dim=0)
x_hat = (x - mu) / torch.sqrt(std * std + self.eps)
out = self.gammas * x_hat + self.betas
return out
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [4]
|
PixelWise | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/lg/clgum5lgqb2igszc5bgmtzzepcvym6hp73vttrcwjtey34a6skhy.py
# Topologically Sorted Source Nodes: [pow_1, mean, add, pow_2, div], Original ATen: [aten.pow, aten.mean, aten.add, aten.div]
# Source node to ATen node mapping:
# add => add
# div => div
# mean => mean
# pow_1 => pow_1
# pow_2 => pow_2
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [1], True), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, 1e-08), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add, 0.5), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %pow_2), kwargs = {})
triton_poi_fused_add_div_mean_pow_0 = async_compile.triton('triton_poi_fused_add_div_mean_pow_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_pow_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mean_pow_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = 4.0
tmp13 = tmp11 / tmp12
tmp14 = 1e-08
tmp15 = tmp13 + tmp14
tmp16 = libdevice.sqrt(tmp15)
tmp17 = tmp0 / tmp16
tl.store(out_ptr0 + (x3), tmp17, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pow_1, mean, add, pow_2, div], Original ATen: [aten.pow, aten.mean, aten.add, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_mean_pow_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
import torch.utils.data.distributed
import torch.nn.init
class PixelWise(torch.nn.Module):
""" Implemented - https://arxiv.org/pdf/1710.10196.pdf """
def __init__(self, eps=1e-08):
super(PixelWise, self).__init__()
self.eps = eps
def forward(self, tensor):
return tensor.div(tensor.pow(2).mean(1, True).add(self.eps).pow(0.5))
def __repr__(self):
return 'pixelwise'
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.utils.data
import torch.utils.data.distributed
import torch.nn.init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mean_pow_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = 4.0
tmp13 = tmp11 / tmp12
tmp14 = 1e-08
tmp15 = tmp13 + tmp14
tmp16 = libdevice.sqrt(tmp15)
tmp17 = tmp0 / tmp16
tl.store(out_ptr0 + x3, tmp17, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mean_pow_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class PixelWiseNew(torch.nn.Module):
""" Implemented - https://arxiv.org/pdf/1710.10196.pdf """
def __init__(self, eps=1e-08):
super(PixelWiseNew, self).__init__()
self.eps = eps
def __repr__(self):
return 'pixelwise'
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| davidwagnerkc/TensorMONK | PixelWise | false | 1,803 | [
"MIT"
] | 0 | 3607836d3d6bfd0994e044536b2a51bc84b35f31 | https://github.com/davidwagnerkc/TensorMONK/tree/3607836d3d6bfd0994e044536b2a51bc84b35f31 | import torch
import torch.utils.data
import torch.utils.data.distributed
import torch.nn.init
class Model(torch.nn.Module):
""" Implemented - https://arxiv.org/pdf/1710.10196.pdf """
def __init__(self, eps=1e-08):
super().__init__()
self.eps = eps
def forward(self, tensor):
return tensor.div(tensor.pow(2).mean(1, True).add(self.eps).pow(0.5))
def __repr__(self):
return 'pixelwise'
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
PositionwiseFeedForward | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/7b/c7bsbu5nqjwno7oolhruidochm2rierdki7fkzahol2dvs7dgv5t.py
# Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# layer_norm => add, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-06), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-06
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/lh/clhh73owbiuj4adasmetdqsot2nlmw2ljupnw2q4yt3du76mikww.py
# Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# layer_norm => add, add_1, mul, mul_1, rsqrt, sub, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-06), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_3, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_2), kwargs = {})
triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/62/c622us6etqvroftqelxdgdedtcxkzuvbkchqjjtjl3nrhqvihz22.py
# Topologically Sorted Source Nodes: [mul, pow_1, mul_1, add, mul_2, tanh, add_1, mul_3], Original ATen: [aten.mul, aten.pow, aten.add, aten.tanh]
# Source node to ATen node mapping:
# add => add_2
# add_1 => add_3
# mul => mul_2
# mul_1 => mul_3
# mul_2 => mul_4
# mul_3 => mul_5
# pow_1 => pow_1
# tanh => tanh
# Graph fragment:
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.5), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_1, 3), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, 0.044715), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %mul_3), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_2, 0.7978845608028654), kwargs = {})
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%mul_4,), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%tanh, 1), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %add_3), kwargs = {})
triton_poi_fused_add_mul_pow_tanh_2 = async_compile.triton('triton_poi_fused_add_mul_pow_tanh_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_pow_tanh_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_pow_tanh_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = tmp0 * tmp0
tmp4 = tmp3 * tmp0
tmp5 = 0.044715
tmp6 = tmp4 * tmp5
tmp7 = tmp0 + tmp6
tmp8 = 0.7978845608028654
tmp9 = tmp7 * tmp8
tmp10 = libdevice.tanh(tmp9)
tmp11 = 1.0
tmp12 = tmp10 + tmp11
tmp13 = tmp2 * tmp12
tl.store(out_ptr0 + (x0), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/5q/c5qmnkuxezgezseizmolw3mx24fyy6xp3cfoz3egpqwcprxgwjre.py
# Topologically Sorted Source Nodes: [add_2], Original ATen: [aten.add]
# Source node to ATen node mapping:
# add_2 => add_4
# Graph fragment:
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_3, %primals_3), kwargs = {})
triton_poi_fused_add_3 = async_compile.triton('triton_poi_fused_add_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, ), (1, ))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm]
stream0 = get_raw_stream(0)
triton_poi_fused_native_layer_norm_0.run(primals_3, buf0, buf1, 64, grid=grid(64), stream=stream0)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_1.run(primals_3, buf0, buf1, primals_1, primals_2, buf2, 256, grid=grid(256), stream=stream0)
del buf0
del buf1
del primals_1
del primals_2
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_5
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, pow_1, mul_1, add, mul_2, tanh, add_1, mul_3], Original ATen: [aten.mul, aten.pow, aten.add, aten.tanh]
triton_poi_fused_add_mul_pow_tanh_2.run(buf3, buf4, 256, grid=grid(256), stream=stream0)
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf4, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf5)
buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [add_2], Original ATen: [aten.add]
triton_poi_fused_add_3.run(buf6, primals_7, primals_3, 256, grid=grid(256), stream=stream0)
del primals_7
return (buf6, primals_3, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), buf3, reinterpret_tensor(buf4, (64, 4), (4, 1), 0), primals_6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.distributed
import torch
import torch.nn as nn
def gelu(x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 *
torch.pow(x, 3))))
class PositionwiseFeedForward(nn.Module):
""" A two-layer Feed-Forward-Network with residual layer norm.
Args:
d_model (int): the size of input for the first-layer of the FFN.
d_ff (int): the hidden layer size of the second-layer
of the FNN.
dropout (float): dropout probability in :math:`[0, 1)`.
"""
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForward, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-06)
self.actv = gelu
self.dropout_1 = nn.Dropout(dropout)
self.dropout_2 = nn.Dropout(dropout)
def forward(self, x):
inter = self.dropout_1(self.actv(self.w_1(self.layer_norm(x))))
output = self.dropout_2(self.w_2(inter))
return output + x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'd_ff': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import math
import torch.distributed
import torch
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-06
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_mul_pow_tanh_2(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = tmp0 * tmp0
tmp4 = tmp3 * tmp0
tmp5 = 0.044715
tmp6 = tmp4 * tmp5
tmp7 = tmp0 + tmp6
tmp8 = 0.7978845608028654
tmp9 = tmp7 * tmp8
tmp10 = libdevice.tanh(tmp9)
tmp11 = 1.0
tmp12 = tmp10 + tmp11
tmp13 = tmp2 * tmp12
tl.store(out_ptr0 + x0, tmp13, xmask)
@triton.jit
def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(64)](primals_3, buf0,
buf1, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(256)](primals_3, buf0,
buf1, primals_1, primals_2, buf2, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del buf0
del buf1
del primals_1
del primals_2
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf3)
del primals_5
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mul_pow_tanh_2[grid(256)](buf3, buf4, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf4, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf5)
buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused_add_3[grid(256)](buf6, primals_7, primals_3, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
return buf6, primals_3, reinterpret_tensor(buf2, (64, 4), (4, 1), 0
), buf3, reinterpret_tensor(buf4, (64, 4), (4, 1), 0
), primals_6, primals_4
def gelu(x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 *
torch.pow(x, 3))))
class PositionwiseFeedForwardNew(nn.Module):
""" A two-layer Feed-Forward-Network with residual layer norm.
Args:
d_model (int): the size of input for the first-layer of the FFN.
d_ff (int): the hidden layer size of the second-layer
of the FNN.
dropout (float): dropout probability in :math:`[0, 1)`.
"""
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForwardNew, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-06)
self.actv = gelu
self.dropout_1 = nn.Dropout(dropout)
self.dropout_2 = nn.Dropout(dropout)
def forward(self, input_0):
primals_4 = self.w_1.weight
primals_1 = self.w_1.bias
primals_6 = self.w_2.weight
primals_2 = self.w_2.bias
primals_5 = self.layer_norm.weight
primals_7 = self.layer_norm.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| dat821168/PreSumm | PositionwiseFeedForward | false | 1,804 | [
"MIT"
] | 0 | 3c84fc97f50a193a865ccef2300adf5683397539 | https://github.com/dat821168/PreSumm/tree/3c84fc97f50a193a865ccef2300adf5683397539 | import math
import torch
import torch.distributed
import torch
import torch.nn as nn
def gelu(x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 *
torch.pow(x, 3))))
class Model(nn.Module):
""" A two-layer Feed-Forward-Network with residual layer norm.
Args:
d_model (int): the size of input for the first-layer of the FFN.
d_ff (int): the hidden layer size of the second-layer
of the FNN.
dropout (float): dropout probability in :math:`[0, 1)`.
"""
def __init__(self, d_model, d_ff, dropout=0.1):
super().__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-06)
self.actv = gelu
self.dropout_1 = nn.Dropout(dropout)
self.dropout_2 = nn.Dropout(dropout)
def forward(self, x):
inter = self.dropout_1(self.actv(self.w_1(self.layer_norm(x))))
output = self.dropout_2(self.w_2(inter))
return output + x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
MiniBatchStdDev | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/i5/ci54xolmm45ehhgn7i3wnusq6ipkt4djovd2pbdbym4v7mdjazyo.py
# Topologically Sorted Source Nodes: [stds_by_chw, mean_std, cat], Original ATen: [aten.std, aten.mean, aten.cat]
# Source node to ATen node mapping:
# cat => cat
# mean_std => mean
# stds_by_chw => sqrt, var
# Graph fragment:
# %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%view, [1]), kwargs = {correction: 0.0})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%var,), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%sqrt, [1, 2, 3], True), kwargs = {})
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%arg0_1, %view_1], 1), kwargs = {})
triton_per_fused_cat_mean_std_0 = async_compile.triton('triton_per_fused_cat_mean_std_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_cat_mean_std_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_cat_mean_std_0(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
r1 = rindex % 16
r2 = (rindex // 16)
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr0 + (64 + r0), None)
tmp3 = tl.load(in_ptr0 + (128 + r0), None)
tmp5 = tl.load(in_ptr0 + (192 + r0), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = libdevice.sqrt(tmp20)
tmp22 = tl.broadcast_to(tmp21, [XBLOCK, RBLOCK])
tmp24 = tl.sum(tmp22, 1)[:, None]
tmp25 = 64.0
tmp26 = tmp24 / tmp25
tl.store(out_ptr1 + (tl.broadcast_to(r1 + (80*r2), [XBLOCK, RBLOCK])), tmp26, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/yi/cyidf2yj3fms5jdxlfe7fdijzfj6p5a5q2qxo4llkuxnpqh6fj5o.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%arg0_1, %view_1], 1), kwargs = {})
triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
x1 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tl.store(out_ptr0 + (x0 + (80*x1)), tmp0, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf3 = empty_strided_cuda((4, 5, 4, 4), (80, 16, 4, 1), torch.float32)
buf2 = reinterpret_tensor(buf3, (4, 1, 4, 4), (80, 16, 4, 1), 64) # alias
# Topologically Sorted Source Nodes: [stds_by_chw, mean_std, cat], Original ATen: [aten.std, aten.mean, aten.cat]
stream0 = get_raw_stream(0)
triton_per_fused_cat_mean_std_0.run(arg0_1, buf2, 1, 64, grid=grid(1), stream=stream0)
buf1 = reinterpret_tensor(buf3, (4, 4, 4, 4), (80, 16, 4, 1), 0) # alias
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
triton_poi_fused_cat_1.run(arg0_1, buf1, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class MiniBatchStdDev(nn.Module):
"""Layer that appends to every element of a batch a new ftr map containing the std of its group."""
def __init__(self, group_sz=4, unbiased_std=False):
super().__init__()
self.group_sz = group_sz
self.unbiased_std = unbiased_std
def forward(self, x):
bs, n_ch, h, w = x.shape
x_groups = x.view(-1, self.group_sz, n_ch, h, w)
stds_by_chw = x_groups.std(dim=1, unbiased=self.unbiased_std)
mean_std = stds_by_chw.mean(dim=[1, 2, 3], keepdim=True)
new_ftr_map = mean_std.unsqueeze(-1).repeat(1, self.group_sz, 1, h, w
).view(bs, 1, h, w)
return torch.cat([x, new_ftr_map], axis=1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_cat_mean_std_0(in_ptr0, out_ptr1, xnumel, rnumel,
XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
r1 = rindex % 16
r2 = rindex // 16
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr0 + (64 + r0), None)
tmp3 = tl.load(in_ptr0 + (128 + r0), None)
tmp5 = tl.load(in_ptr0 + (192 + r0), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = libdevice.sqrt(tmp20)
tmp22 = tl.broadcast_to(tmp21, [XBLOCK, RBLOCK])
tmp24 = tl.sum(tmp22, 1)[:, None]
tmp25 = 64.0
tmp26 = tmp24 / tmp25
tl.store(out_ptr1 + tl.broadcast_to(r1 + 80 * r2, [XBLOCK, RBLOCK]),
tmp26, None)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
x1 = xindex // 64
tmp0 = tl.load(in_ptr0 + x2, xmask)
tl.store(out_ptr0 + (x0 + 80 * x1), tmp0, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf3 = empty_strided_cuda((4, 5, 4, 4), (80, 16, 4, 1), torch.float32)
buf2 = reinterpret_tensor(buf3, (4, 1, 4, 4), (80, 16, 4, 1), 64)
get_raw_stream(0)
triton_per_fused_cat_mean_std_0[grid(1)](arg0_1, buf2, 1, 64,
XBLOCK=1, num_warps=2, num_stages=1)
buf1 = reinterpret_tensor(buf3, (4, 4, 4, 4), (80, 16, 4, 1), 0)
triton_poi_fused_cat_1[grid(256)](arg0_1, buf1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf3,
class MiniBatchStdDevNew(nn.Module):
"""Layer that appends to every element of a batch a new ftr map containing the std of its group."""
def __init__(self, group_sz=4, unbiased_std=False):
super().__init__()
self.group_sz = group_sz
self.unbiased_std = unbiased_std
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| davidleonfdez/face2anime | MiniBatchStdDev | false | 1,805 | [
"MIT"
] | 0 | 896bf85a7aa28322cc9e9e586685db8cbbf39d89 | https://github.com/davidleonfdez/face2anime/tree/896bf85a7aa28322cc9e9e586685db8cbbf39d89 | import torch
import torch.nn as nn
class Model(nn.Module):
"""Layer that appends to every element of a batch a new ftr map containing the std of its group."""
def __init__(self, group_sz=4, unbiased_std=False):
super().__init__()
self.group_sz = group_sz
self.unbiased_std = unbiased_std
def forward(self, x):
bs, n_ch, h, w = x.shape
x_groups = x.view(-1, self.group_sz, n_ch, h, w)
stds_by_chw = x_groups.std(dim=1, unbiased=self.unbiased_std)
mean_std = stds_by_chw.mean(dim=[1, 2, 3], keepdim=True)
new_ftr_map = mean_std.unsqueeze(-1).repeat(1, self.group_sz, 1, h, w
).view(bs, 1, h, w)
return torch.cat([x, new_ftr_map], axis=1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
MultiheadAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/vb/cvbxdocetk5tj67qrxlvwxnmwf3z2efb37af24dzpjl5kw5bkmq5.py
# Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# mul_1 => mul_1
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem_1, 1.0), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (1 + (3*x2)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (1 + (3*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/q6/cq6z6xhbr2gyiw5id4lceemepctrgo25eo5gvvxlgbncy77qxm4p.py
# Topologically Sorted Source Nodes: [mul, softmax], Original ATen: [aten.mul, aten._softmax]
# Source node to ATen node mapping:
# mul => mul
# softmax => amax, clone, exp, sub, sum_1
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem, 1.0), kwargs = {})
# %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%view_3,), kwargs = {memory_format: torch.contiguous_format})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%clone, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clone, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
triton_poi_fused__softmax_mul_1 = async_compile.triton('triton_poi_fused__softmax_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
x3 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (3*x4), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (3*x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + (x0 + (16*x3)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr2 + (4 + x0 + (16*x3)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + (8 + x0 + (16*x3)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr2 + (12 + x0 + (16*x3)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp4 * tmp7
tmp9 = triton_helpers.maximum(tmp6, tmp8)
tmp11 = tmp4 * tmp10
tmp12 = triton_helpers.maximum(tmp9, tmp11)
tmp14 = tmp4 * tmp13
tmp15 = triton_helpers.maximum(tmp12, tmp14)
tmp16 = tmp6 - tmp15
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp8 - tmp15
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp17 + tmp19
tmp21 = tmp11 - tmp15
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp20 + tmp22
tmp24 = tmp14 - tmp15
tmp25 = tl_math.exp(tmp24)
tmp26 = tmp23 + tmp25
tl.store(out_ptr0 + (x4), tmp4, xmask)
tl.store(out_ptr1 + (x4), tmp15, xmask)
tl.store(out_ptr2 + (x4), tmp26, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/qh/cqhxytgzmsgejzq2rbt7fdr5j2twqslr3w2cz4yhih2lbgvx6yly.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => amax, clone, div, exp, sub, sum_1
# Graph fragment:
# %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%view_3,), kwargs = {memory_format: torch.contiguous_format})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%clone, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clone, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = (xindex // 4)
x0 = xindex % 4
x1 = (xindex // 4) % 4
x3 = (xindex // 64)
x2 = (xindex // 16) % 4
tmp0 = tl.load(in_ptr0 + (x4), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x1 + (4*x0) + (16*x3)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x4), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr3 + (x4), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp2 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp7 = tmp5 / tmp6
tl.store(out_ptr0 + (x0 + (4*x2) + (16*x1) + (64*x3)), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/uq/cuqseg2vikrs3ttioriunrdhmp6cffp4xwpf2b6xdz7w3atftosj.py
# Topologically Sorted Source Nodes: [einsum_1], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# einsum_1 => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_6,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (2 + (3*x1) + (12*x0) + (48*x2)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (2 + (3*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/6t/c6t5a5ere3lqjiu7zh3uu4oxmpdoujdaqqmeunxqapgzo4m74uav.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# x_2 => clone_2
# Graph fragment:
# %clone_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%view_8,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/sz/csz5twnkhe2zz3eaafwmgidmezpgvs3favuuva73uud6rs7ouhj6.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.add]
# Source node to ATen node mapping:
# x_2 => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_10, %primals_5), kwargs = {})
triton_poi_fused_add_5 = async_compile.triton('triton_poi_fused_add_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (12, 4), (4, 1))
assert_size_stride(primals_2, (12, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 12), (1, 4), 0), out=buf0)
del primals_1
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(buf0, primals_2, buf2, 64, grid=grid(64), stream=stream0)
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 1, 4, 64), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 1, 4, 64), torch.float32)
# Topologically Sorted Source Nodes: [mul, softmax], Original ATen: [aten.mul, aten._softmax]
triton_poi_fused__softmax_mul_1.run(buf0, primals_2, buf2, buf1, buf3, buf4, 64, grid=grid(64), stream=stream0)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf1, buf2, buf3, buf4, buf5, 256, grid=grid(256), stream=stream0)
buf6 = reinterpret_tensor(buf4, (4, 4, 4, 1, 1), (16, 4, 1, 1, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [einsum_1], Original ATen: [aten.clone]
triton_poi_fused_clone_3.run(buf0, primals_2, buf6, 64, grid=grid(64), stream=stream0)
del buf0
del primals_2
buf7 = reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 1), 0); del buf3 # reuse
# Topologically Sorted Source Nodes: [einsum_1], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf6, (16, 4, 1), (4, 1, 0), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.clone]
triton_poi_fused_clone_4.run(buf7, buf8, 16, 4, grid=grid(16, 4), stream=stream0)
buf9 = reinterpret_tensor(buf7, (16, 4), (4, 1), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf8, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf9)
buf10 = reinterpret_tensor(buf9, (4, 4, 4), (16, 4, 1), 0); del buf9 # reuse
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.add]
triton_poi_fused_add_5.run(buf10, primals_5, 64, grid=grid(64), stream=stream0)
del primals_5
return (buf10, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(buf1, (4, 4, 4, 1, 1), (16, 1, 4, 1, 1), 0), reinterpret_tensor(buf2, (4, 4, 1, 4, 1), (16, 1, 1, 4, 1), 0), buf5, reinterpret_tensor(buf8, (16, 4), (4, 1), 0), primals_4, reinterpret_tensor(buf6, (16, 1, 4), (4, 1, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch as th
import torch.nn as nn
class QKVMultiheadAttention(nn.Module):
def __init__(self, n_heads: 'int', n_ctx: 'int'):
super().__init__()
self.n_heads = n_heads
self.n_ctx = n_ctx
def forward(self, qkv):
bs, n_ctx, width = qkv.shape
attn_ch = width // self.n_heads // 3
scale = 1 / math.sqrt(math.sqrt(attn_ch))
qkv = qkv.view(bs, n_ctx, self.n_heads, -1)
q, k, v = th.split(qkv, attn_ch, dim=-1)
weight = th.einsum('bthc,bshc->bhts', q * scale, k * scale)
wdtype = weight.dtype
weight = th.softmax(weight.float(), dim=-1).type(wdtype)
return th.einsum('bhts,bshc->bthc', weight, v).reshape(bs, n_ctx, -1)
class MultiheadAttention(nn.Module):
def __init__(self, n_ctx, width, heads):
super().__init__()
self.n_ctx = n_ctx
self.width = width
self.heads = heads
self.c_qkv = nn.Linear(width, width * 3)
self.c_proj = nn.Linear(width, width)
self.attention = QKVMultiheadAttention(heads, n_ctx)
def forward(self, x):
x = self.c_qkv(x)
x = self.attention(x)
x = self.c_proj(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'n_ctx': 4, 'width': 4, 'heads': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
import torch as th
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (1 + 3 * x2), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (1 + 3 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused__softmax_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
x3 = xindex // 16
tmp0 = tl.load(in_ptr0 + 3 * x4, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 3 * x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + (x0 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr2 + (4 + x0 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr2 + (8 + x0 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp13 = tl.load(in_ptr2 + (12 + x0 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp4 * tmp7
tmp9 = triton_helpers.maximum(tmp6, tmp8)
tmp11 = tmp4 * tmp10
tmp12 = triton_helpers.maximum(tmp9, tmp11)
tmp14 = tmp4 * tmp13
tmp15 = triton_helpers.maximum(tmp12, tmp14)
tmp16 = tmp6 - tmp15
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp8 - tmp15
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp17 + tmp19
tmp21 = tmp11 - tmp15
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp20 + tmp22
tmp24 = tmp14 - tmp15
tmp25 = tl_math.exp(tmp24)
tmp26 = tmp23 + tmp25
tl.store(out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr1 + x4, tmp15, xmask)
tl.store(out_ptr2 + x4, tmp26, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex // 4
x0 = xindex % 4
x1 = xindex // 4 % 4
x3 = xindex // 64
x2 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x1 + 4 * x0 + 16 * x3), xmask,
eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr3 + x4, xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp2 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp7 = tmp5 / tmp6
tl.store(out_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), tmp7, xmask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (2 + 3 * x1 + 12 * x0 + 48 * x2), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (2 + 3 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (12, 4), (4, 1))
assert_size_stride(primals_2, (12,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 12), (1, 4), 0), out=buf0)
del primals_1
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(64)](buf0, primals_2, buf2, 64, XBLOCK=
64, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 1, 4, 64), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 1, 4, 64), torch.float32)
triton_poi_fused__softmax_mul_1[grid(64)](buf0, primals_2, buf2,
buf1, buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_2[grid(256)](buf1, buf2, buf3, buf4, buf5,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf6 = reinterpret_tensor(buf4, (4, 4, 4, 1, 1), (16, 4, 1, 1, 1), 0)
del buf4
triton_poi_fused_clone_3[grid(64)](buf0, primals_2, buf6, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del buf0
del primals_2
buf7 = reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 1), 0)
del buf3
extern_kernels.bmm(reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf6, (16, 4, 1), (4, 1, 0), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_clone_4[grid(16, 4)](buf7, buf8, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf9 = reinterpret_tensor(buf7, (16, 4), (4, 1), 0)
del buf7
extern_kernels.mm(reinterpret_tensor(buf8, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf9)
buf10 = reinterpret_tensor(buf9, (4, 4, 4), (16, 4, 1), 0)
del buf9
triton_poi_fused_add_5[grid(64)](buf10, primals_5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_5
return buf10, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0
), reinterpret_tensor(buf1, (4, 4, 4, 1, 1), (16, 1, 4, 1, 1), 0
), reinterpret_tensor(buf2, (4, 4, 1, 4, 1), (16, 1, 1, 4, 1), 0
), buf5, reinterpret_tensor(buf8, (16, 4), (4, 1), 0
), primals_4, reinterpret_tensor(buf6, (16, 1, 4), (4, 1, 1), 0)
class QKVMultiheadAttention(nn.Module):
def __init__(self, n_heads: 'int', n_ctx: 'int'):
super().__init__()
self.n_heads = n_heads
self.n_ctx = n_ctx
def forward(self, qkv):
bs, n_ctx, width = qkv.shape
attn_ch = width // self.n_heads // 3
scale = 1 / math.sqrt(math.sqrt(attn_ch))
qkv = qkv.view(bs, n_ctx, self.n_heads, -1)
q, k, v = th.split(qkv, attn_ch, dim=-1)
weight = th.einsum('bthc,bshc->bhts', q * scale, k * scale)
wdtype = weight.dtype
weight = th.softmax(weight.float(), dim=-1).type(wdtype)
return th.einsum('bhts,bshc->bthc', weight, v).reshape(bs, n_ctx, -1)
class MultiheadAttentionNew(nn.Module):
def __init__(self, n_ctx, width, heads):
super().__init__()
self.n_ctx = n_ctx
self.width = width
self.heads = heads
self.c_qkv = nn.Linear(width, width * 3)
self.c_proj = nn.Linear(width, width)
self.attention = QKVMultiheadAttention(heads, n_ctx)
def forward(self, input_0):
primals_1 = self.c_qkv.weight
primals_2 = self.c_qkv.bias
primals_4 = self.c_proj.weight
primals_5 = self.c_proj.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| dashstander/glide-text2im | MultiheadAttention | false | 1,806 | [
"MIT"
] | 0 | 58f03a871ee0567e27fccc40df98203e675a9b8e | https://github.com/dashstander/glide-text2im/tree/58f03a871ee0567e27fccc40df98203e675a9b8e | import math
import torch
import torch as th
import torch.nn as nn
class QKVMultiheadAttention(nn.Module):
def __init__(self, n_heads: 'int', n_ctx: 'int'):
super().__init__()
self.n_heads = n_heads
self.n_ctx = n_ctx
def forward(self, qkv):
bs, n_ctx, width = qkv.shape
attn_ch = width // self.n_heads // 3
scale = 1 / math.sqrt(math.sqrt(attn_ch))
qkv = qkv.view(bs, n_ctx, self.n_heads, -1)
q, k, v = th.split(qkv, attn_ch, dim=-1)
weight = th.einsum('bthc,bshc->bhts', q * scale, k * scale)
wdtype = weight.dtype
weight = th.softmax(weight.float(), dim=-1).type(wdtype)
return th.einsum('bhts,bshc->bthc', weight, v).reshape(bs, n_ctx, -1)
class Model(nn.Module):
def __init__(self, n_ctx, width, heads):
super().__init__()
self.n_ctx = n_ctx
self.width = width
self.heads = heads
self.c_qkv = nn.Linear(width, width * 3)
self.c_proj = nn.Linear(width, width)
self.attention = QKVMultiheadAttention(heads, n_ctx)
def forward(self, x):
x = self.c_qkv(x)
x = self.attention(x)
x = self.c_proj(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
Q_Index | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/ge/cgev3sqyqdjuthmmnhser34xcrhqixpsu56hzfsiksyixfgxpzls.py
# Topologically Sorted Source Nodes: [mul, mean_inp_times_tar, input_mean, target_mean, input_var, target_var], Original ATen: [aten.mul, aten.mean, aten.var]
# Source node to ATen node mapping:
# input_mean => mean
# input_var => var
# mean_inp_times_tar => mean_2
# mul => mul
# target_mean => mean_1
# target_var => var_1
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %view_1), kwargs = {})
# %mean_2 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%mul, [-1]), kwargs = {})
# %mean : [num_users=3] = call_function[target=torch.ops.aten.mean.dim](args = (%view, [-1]), kwargs = {})
# %mean_1 : [num_users=3] = call_function[target=torch.ops.aten.mean.dim](args = (%view_1, [-1]), kwargs = {})
# %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%view, [-1]), kwargs = {correction: 1})
# %var_1 : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%view_1, [-1]), kwargs = {correction: 1})
triton_per_fused_mean_mul_var_0 = async_compile.triton('triton_per_fused_mean_mul_var_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_mul_var_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 9, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_mul_var_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + (64*x0)), xmask, other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp7 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp12 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK])
tmp14 = tl.where(xmask, tmp12, 0)
tmp15 = tl.sum(tmp14, 1)[:, None]
tmp16 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp17 = tmp16.to(tl.float32)
tmp18 = tmp15 / tmp17
tmp19 = tmp7 - tmp18
tmp20 = tmp19 * tmp19
tmp21 = tl.broadcast_to(tmp20, [XBLOCK, RBLOCK])
tmp23 = tl.where(xmask, tmp21, 0)
tmp24 = tl.sum(tmp23, 1)[:, None]
tmp25 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp27 = tl.where(xmask, tmp25, 0)
tmp28 = tl.sum(tmp27, 1)[:, None]
tmp30 = tl.broadcast_to(tmp25, [XBLOCK, RBLOCK])
tmp32 = tl.where(xmask, tmp30, 0)
tmp33 = tl.sum(tmp32, 1)[:, None]
tmp34 = tmp33 / tmp17
tmp35 = tmp25 - tmp34
tmp36 = tmp35 * tmp35
tmp37 = tl.broadcast_to(tmp36, [XBLOCK, RBLOCK])
tmp39 = tl.where(xmask, tmp37, 0)
tmp40 = tl.sum(tmp39, 1)[:, None]
tl.store(out_ptr0 + (x0), tmp6, xmask)
tl.store(out_ptr1 + (x0), tmp10, xmask)
tl.store(out_ptr2 + (x0), tmp24, xmask)
tl.store(out_ptr3 + (x0), tmp28, xmask)
tl.store(out_ptr4 + (x0), tmp40, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/s4/cs4cskd3kptvlbliq62ju3c55objjn6yexnbjpgm6wlcf6x2gpve.py
# Topologically Sorted Source Nodes: [mul, mean_inp_times_tar, input_mean, target_mean, mul_1, covariance, mul_2, mul_3, mul_4, input_var, target_var, add, pow_1, pow_2, add_1, mul_5, Q, Q_1], Original ATen: [aten.mul, aten.mean, aten.sub, aten.var, aten.add, aten.pow, aten.div]
# Source node to ATen node mapping:
# Q => div
# Q_1 => mean_3
# add => add
# add_1 => add_1
# covariance => sub
# input_mean => mean
# input_var => var
# mean_inp_times_tar => mean_2
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# mul_3 => mul_3
# mul_4 => mul_4
# mul_5 => mul_5
# pow_1 => pow_1
# pow_2 => pow_2
# target_mean => mean_1
# target_var => var_1
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %view_1), kwargs = {})
# %mean_2 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%mul, [-1]), kwargs = {})
# %mean : [num_users=3] = call_function[target=torch.ops.aten.mean.dim](args = (%view, [-1]), kwargs = {})
# %mean_1 : [num_users=3] = call_function[target=torch.ops.aten.mean.dim](args = (%view_1, [-1]), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, %mean_1), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mean_2, %mul_1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, 4.0), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %mean), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %mean_1), kwargs = {})
# %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%view, [-1]), kwargs = {correction: 1})
# %var_1 : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%view_1, [-1]), kwargs = {correction: 1})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%var, %var_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%mean, 2), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%mean_1, 2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_1, %pow_2), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, %add_1), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_4, %mul_5), kwargs = {})
# %mean_3 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%div,), kwargs = {})
triton_per_fused_add_div_mean_mul_pow_sub_var_1 = async_compile.triton('triton_per_fused_add_div_mean_mul_pow_sub_var_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {6: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=(6,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mean_mul_pow_sub_var_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mean_mul_pow_sub_var_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp3 = tl.load(in_ptr1 + (r0), None)
tmp5 = tl.load(in_ptr2 + (r0), None)
tmp13 = tl.load(in_ptr3 + (r0), None)
tmp16 = tl.load(in_ptr4 + (r0), None)
tmp1 = 64.0
tmp2 = tmp0 / tmp1
tmp4 = tmp3 / tmp1
tmp6 = tmp5 / tmp1
tmp7 = tmp4 * tmp6
tmp8 = tmp2 - tmp7
tmp9 = 4.0
tmp10 = tmp8 * tmp9
tmp11 = tmp10 * tmp4
tmp12 = tmp11 * tmp6
tmp14 = 63.0
tmp15 = tmp13 / tmp14
tmp17 = tmp16 / tmp14
tmp18 = tmp15 + tmp17
tmp19 = tmp4 * tmp4
tmp20 = tmp6 * tmp6
tmp21 = tmp19 + tmp20
tmp22 = tmp18 * tmp21
tmp23 = tmp12 / tmp22
tmp24 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK])
tmp26 = tl.sum(tmp24, 1)[:, None]
tmp27 = tmp26 / tmp9
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp27, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf1 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf4 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf2 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf7 = empty_strided_cuda((4, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [mul, mean_inp_times_tar, input_mean, target_mean, input_var, target_var], Original ATen: [aten.mul, aten.mean, aten.var]
stream0 = get_raw_stream(0)
triton_per_fused_mean_mul_var_0.run(arg0_1, arg1_1, buf0, buf1, buf4, buf2, buf7, 4, 64, grid=grid(4), stream=stream0)
del arg0_1
del arg1_1
buf9 = empty_strided_cuda((), (), torch.float32)
buf10 = buf9; del buf9 # reuse
# Topologically Sorted Source Nodes: [mul, mean_inp_times_tar, input_mean, target_mean, mul_1, covariance, mul_2, mul_3, mul_4, input_var, target_var, add, pow_1, pow_2, add_1, mul_5, Q, Q_1], Original ATen: [aten.mul, aten.mean, aten.sub, aten.var, aten.add, aten.pow, aten.div]
triton_per_fused_add_div_mean_mul_pow_sub_var_1.run(buf10, buf0, buf1, buf2, buf4, buf7, 1, 4, grid=grid(1), stream=stream0)
del buf0
del buf1
del buf2
del buf4
del buf7
return (buf10, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Q_Index(nn.Module):
"""
Quality measurement between perturbated (image with applied noise) and denoised target image.
This module works only for images with a single color channel (grayscale)
"""
def __init__(self):
super().__init__()
def forward(self, input, target):
batch_size = input.shape[0]
input = input.view(batch_size, -1)
target = target.view(batch_size, -1)
input_mean = input.mean(dim=-1)
target_mean = target.mean(dim=-1)
input_var = input.var(dim=-1)
target_var = target.var(dim=-1)
mean_inp_times_tar = torch.mean(input * target, dim=-1)
covariance = mean_inp_times_tar - input_mean * target_mean
Q = 4.0 * covariance * input_mean * target_mean / ((input_var +
target_var) * (input_mean ** 2 + target_mean ** 2))
Q = Q.mean()
return Q
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mean_mul_var_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
out_ptr2, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp7 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp12 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK])
tmp14 = tl.where(xmask, tmp12, 0)
tmp15 = tl.sum(tmp14, 1)[:, None]
tmp16 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp17 = tmp16.to(tl.float32)
tmp18 = tmp15 / tmp17
tmp19 = tmp7 - tmp18
tmp20 = tmp19 * tmp19
tmp21 = tl.broadcast_to(tmp20, [XBLOCK, RBLOCK])
tmp23 = tl.where(xmask, tmp21, 0)
tmp24 = tl.sum(tmp23, 1)[:, None]
tmp25 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp27 = tl.where(xmask, tmp25, 0)
tmp28 = tl.sum(tmp27, 1)[:, None]
tmp30 = tl.broadcast_to(tmp25, [XBLOCK, RBLOCK])
tmp32 = tl.where(xmask, tmp30, 0)
tmp33 = tl.sum(tmp32, 1)[:, None]
tmp34 = tmp33 / tmp17
tmp35 = tmp25 - tmp34
tmp36 = tmp35 * tmp35
tmp37 = tl.broadcast_to(tmp36, [XBLOCK, RBLOCK])
tmp39 = tl.where(xmask, tmp37, 0)
tmp40 = tl.sum(tmp39, 1)[:, None]
tl.store(out_ptr0 + x0, tmp6, xmask)
tl.store(out_ptr1 + x0, tmp10, xmask)
tl.store(out_ptr2 + x0, tmp24, xmask)
tl.store(out_ptr3 + x0, tmp28, xmask)
tl.store(out_ptr4 + x0, tmp40, xmask)
@triton.jit
def triton_per_fused_add_div_mean_mul_pow_sub_var_1(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp5 = tl.load(in_ptr2 + r0, None)
tmp13 = tl.load(in_ptr3 + r0, None)
tmp16 = tl.load(in_ptr4 + r0, None)
tmp1 = 64.0
tmp2 = tmp0 / tmp1
tmp4 = tmp3 / tmp1
tmp6 = tmp5 / tmp1
tmp7 = tmp4 * tmp6
tmp8 = tmp2 - tmp7
tmp9 = 4.0
tmp10 = tmp8 * tmp9
tmp11 = tmp10 * tmp4
tmp12 = tmp11 * tmp6
tmp14 = 63.0
tmp15 = tmp13 / tmp14
tmp17 = tmp16 / tmp14
tmp18 = tmp15 + tmp17
tmp19 = tmp4 * tmp4
tmp20 = tmp6 * tmp6
tmp21 = tmp19 + tmp20
tmp22 = tmp18 * tmp21
tmp23 = tmp12 / tmp22
tmp24 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK])
tmp26 = tl.sum(tmp24, 1)[:, None]
tmp27 = tmp26 / tmp9
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp27, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.float32)
buf1 = empty_strided_cuda((4,), (1,), torch.float32)
buf4 = empty_strided_cuda((4,), (1,), torch.float32)
buf2 = empty_strided_cuda((4,), (1,), torch.float32)
buf7 = empty_strided_cuda((4,), (1,), torch.float32)
get_raw_stream(0)
triton_per_fused_mean_mul_var_0[grid(4)](arg0_1, arg1_1, buf0, buf1,
buf4, buf2, buf7, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
buf9 = empty_strided_cuda((), (), torch.float32)
buf10 = buf9
del buf9
triton_per_fused_add_div_mean_mul_pow_sub_var_1[grid(1)](buf10,
buf0, buf1, buf2, buf4, buf7, 1, 4, XBLOCK=1, num_warps=2,
num_stages=1)
del buf0
del buf1
del buf2
del buf4
del buf7
return buf10,
class Q_IndexNew(nn.Module):
"""
Quality measurement between perturbated (image with applied noise) and denoised target image.
This module works only for images with a single color channel (grayscale)
"""
def __init__(self):
super().__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| dawnofthedebayan/MedAI_Project | Q_Index | false | 1,807 | [
"Apache-2.0"
] | 0 | a7f2597c96569662f1ca9d21ffd0eb41c77211c1 | https://github.com/dawnofthedebayan/MedAI_Project/tree/a7f2597c96569662f1ca9d21ffd0eb41c77211c1 | import torch
import torch.nn as nn
class Model(nn.Module):
"""
Quality measurement between perturbated (image with applied noise) and denoised target image.
This module works only for images with a single color channel (grayscale)
"""
def __init__(self):
super().__init__()
def forward(self, input, target):
batch_size = input.shape[0]
input = input.view(batch_size, -1)
target = target.view(batch_size, -1)
input_mean = input.mean(dim=-1)
target_mean = target.mean(dim=-1)
input_var = input.var(dim=-1)
target_var = target.var(dim=-1)
mean_inp_times_tar = torch.mean(input * target, dim=-1)
covariance = mean_inp_times_tar - input_mean * target_mean
Q = 4.0 * covariance * input_mean * target_mean / ((input_var +
target_var) * (input_mean ** 2 + target_mean ** 2))
Q = Q.mean()
return Q
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
myNet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/r3/cr3dj3iqo4mxp3jap6oqywrnhpoxhtxzi373qtxftjspbz3o3pb3.py
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out_1 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 1000
x2 = xindex % 4000
x3 = (xindex // 4000)
tmp0 = tl.load(in_out_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x4), tmp4, xmask)
tl.store(out_ptr0 + (x2 + (4096*x3)), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/qw/cqwdvuc66lglzux6l2qprkpkyfvgktk37ixovgb2zaonbhwfr275.py
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out_3 => relu_1
# Graph fragment:
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 6400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 100
x2 = xindex % 1600
x3 = (xindex // 1600)
tmp0 = tl.load(in_out_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x4), tmp4, xmask)
tl.store(out_ptr0 + (x2 + (1664*x3)), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (1000, 4), (4, 1))
assert_size_stride(primals_2, (1000, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (100, 1000), (1000, 1))
assert_size_stride(primals_5, (100, ), (1, ))
assert_size_stride(primals_6, (10, 100), (100, 1))
assert_size_stride(primals_7, (10, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 1000), (1000, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1000), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 1000), (16000, 4000, 1000, 1), 0); del buf0 # reuse
buf6 = empty_strided_cuda((4, 4, 4, 1000), (16384, 4096, 1000, 1), torch.bool)
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf6, 64000, grid=grid(64000), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 100), (100, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 1000), (1000, 1), 0), reinterpret_tensor(primals_4, (1000, 100), (1, 1000), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 100), (1600, 400, 100, 1), 0); del buf2 # reuse
buf5 = empty_strided_cuda((4, 4, 4, 100), (1664, 400, 100, 1), torch.bool)
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf3, primals_5, buf5, 6400, grid=grid(6400), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_4], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 100), (100, 1), 0), reinterpret_tensor(primals_6, (100, 10), (1, 100), 0), alpha=1, beta=1, out=buf4)
del primals_7
return (reinterpret_tensor(buf4, (4, 4, 4, 10), (160, 40, 10, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 1000), (1000, 1), 0), reinterpret_tensor(buf3, (64, 100), (100, 1), 0), primals_6, buf5, primals_4, buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((1000, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1000, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((100, 1000), (1000, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((100, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((10, 100), (100, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class myNet(nn.Module):
def __init__(self, in_features, num_classes=10):
super(myNet, self).__init__()
self.fc1 = nn.Linear(in_features, 1000)
self.fc2 = nn.Linear(1000, 100)
self.fc3 = nn.Linear(100, num_classes)
self.relu = nn.ReLU()
def forward(self, x):
out = self.fc1(x)
out = self.relu(out)
out = self.fc2(out)
out = self.relu(out)
out = self.fc3(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 1000
x2 = xindex % 4000
x3 = xindex // 4000
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + (x2 + 4096 * x3), tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 6400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 100
x2 = xindex % 1600
x3 = xindex // 1600
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + (x2 + 1664 * x3), tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (1000, 4), (4, 1))
assert_size_stride(primals_2, (1000,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (100, 1000), (1000, 1))
assert_size_stride(primals_5, (100,), (1,))
assert_size_stride(primals_6, (10, 100), (100, 1))
assert_size_stride(primals_7, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 1000), (1000, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 1000), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 1000), (16000, 4000, 1000,
1), 0)
del buf0
buf6 = empty_strided_cuda((4, 4, 4, 1000), (16384, 4096, 1000, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(64000)](buf1,
primals_2, buf6, 64000, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 100), (100, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 1000), (1000, 1), 0
), reinterpret_tensor(primals_4, (1000, 100), (1, 1000), 0),
out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 100), (1600, 400, 100, 1), 0)
del buf2
buf5 = empty_strided_cuda((4, 4, 4, 100), (1664, 400, 100, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(6400)](buf3,
primals_5, buf5, 6400, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 100),
(100, 1), 0), reinterpret_tensor(primals_6, (100, 10), (1, 100),
0), alpha=1, beta=1, out=buf4)
del primals_7
return reinterpret_tensor(buf4, (4, 4, 4, 10), (160, 40, 10, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 1000), (1000, 1), 0
), reinterpret_tensor(buf3, (64, 100), (100, 1), 0
), primals_6, buf5, primals_4, buf6
class myNetNew(nn.Module):
def __init__(self, in_features, num_classes=10):
super(myNetNew, self).__init__()
self.fc1 = nn.Linear(in_features, 1000)
self.fc2 = nn.Linear(1000, 100)
self.fc3 = nn.Linear(100, num_classes)
self.relu = nn.ReLU()
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| daxiongpro/pytorch-tutorial | myNet | false | 1,808 | [
"MIT"
] | 0 | abafc32f7ee1092024085f703e4ced51ce358a1b | https://github.com/daxiongpro/pytorch-tutorial/tree/abafc32f7ee1092024085f703e4ced51ce358a1b | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, in_features, num_classes=10):
super().__init__()
self.fc1 = nn.Linear(in_features, 1000)
self.fc2 = nn.Linear(1000, 100)
self.fc3 = nn.Linear(100, num_classes)
self.relu = nn.ReLU()
def forward(self, x):
out = self.fc1(x)
out = self.relu(out)
out = self.fc2(out)
out = self.relu(out)
out = self.fc3(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
ConvolutionTranspose | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/h4/ch4rzcuftzo2pej6tmqv5vqbhg2q6vtsthjwqr4ug5xxb2o56m4q.py
# Topologically Sorted Source Nodes: [tensor_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# tensor_1 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(in_out_ptr0 + (x0), tmp2, xmask)
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [tensor], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 3, 3), (36, 9, 3, 1))
buf1 = buf0; del buf0 # reuse
buf2 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.bool)
# Topologically Sorted Source Nodes: [tensor_1], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, buf2, 144, grid=grid(144), stream=stream0)
return (buf1, primals_1, primals_2, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
import torch.utils.data.distributed
import torch.nn.init
import torch.nn as nn
import torch.nn.functional as F
def Normalizations(tensor_size=None, normalization=None, available=False,
**kwargs):
"""Does normalization on 4D tensor.
Args:
tensor_size: shape of tensor in BCHW
(None/any integer >0, channels, height, width)
normalization: None/batch/group/instance/layer/pixelwise
available: if True, returns all available normalization methods
groups: for group (GroupNorm), when not provided groups is the center
value of all possible - ex: for a tensor_size[1] = 128, groups is
set to 16 as the possible groups are [1, 2, 4, 8, 16, 32, 64, 128]
affine: for group and instance normalization, default False
elementwise_affine: for layer normalization. default True
"""
list_available = ['batch', 'group', 'instance', 'layer', 'pixelwise']
if available:
return list_available
normalization = normalization.lower()
assert normalization in list_available, 'Normalization must be None/' + '/'.join(
list_available)
if normalization == 'batch':
return torch.nn.BatchNorm2d(tensor_size[1])
elif normalization == 'group':
affine = kwargs['affine'] if 'affine' in kwargs.keys() else False
if 'groups' in kwargs.keys():
return torch.nn.GroupNorm(kwargs['groups'], tensor_size[1],
affine=affine)
else:
possible = [(tensor_size[1] // i) for i in range(tensor_size[1],
0, -1) if tensor_size[1] % i == 0]
groups = possible[len(possible) // 2]
return torch.nn.GroupNorm(groups, tensor_size[1], affine=affine)
elif normalization == 'instance':
affine = kwargs['affine'] if 'affine' in kwargs.keys() else False
return torch.nn.InstanceNorm2d(tensor_size[1], affine=affine)
elif normalization == 'layer':
elementwise_affine = kwargs['elementwise_affine'
] if 'elementwise_affine' in kwargs.keys() else True
return torch.nn.LayerNorm(tensor_size[1:], elementwise_affine=
elementwise_affine)
elif normalization == 'pixelwise':
return PixelWise()
class Activations(nn.Module):
""" All the usual activations along with maxout, relu + maxout and swish.
MaxOut (maxo) - https://arxiv.org/pdf/1302.4389.pdf
Swish - https://arxiv.org/pdf/1710.05941v1.pdf
Args:
activation: relu/relu6/lklu/elu/prelu/tanh/sigm/maxo/rmxo/swish
channels: parameter for prelu, default is 1
"""
def __init__(self, activation='relu', channels=1):
super(Activations, self).__init__()
if activation is not None:
activation = activation.lower()
self.activation = activation
self.function = None
if activation in self.available():
self.function = getattr(self, '_' + activation)
if activation == 'prelu':
self.weight = nn.Parameter(torch.rand(channels))
else:
self.activation = ''
def forward(self, tensor):
if self.function is None:
return tensor
return self.function(tensor)
def _relu(self, tensor):
return F.relu(tensor)
def _relu6(self, tensor):
return F.relu6(tensor)
def _lklu(self, tensor):
return F.leaky_relu(tensor)
def _elu(self, tensor):
return F.elu(tensor)
def _prelu(self, tensor):
return F.prelu(tensor, self.weight)
def _tanh(self, tensor):
return torch.tanh(tensor)
def _sigm(self, tensor):
return torch.sigmoid(tensor)
def _maxo(self, tensor):
assert tensor.size(1) % 2 == 0, 'MaxOut: tensor.size(1) must be even'
return torch.max(*tensor.split(tensor.size(1) // 2, 1))
def _rmxo(self, tensor):
return self._maxo(F.relu(tensor))
def _swish(self, tensor):
return tensor * torch.sigmoid(tensor)
def __repr__(self):
return self.activation
@staticmethod
def available():
return ['relu', 'relu6', 'lklu', 'elu', 'prelu', 'tanh', 'sigm',
'maxo', 'rmxo', 'swish']
class PixelWise(torch.nn.Module):
""" Implemented - https://arxiv.org/pdf/1710.10196.pdf """
def __init__(self, eps=1e-08):
super(PixelWise, self).__init__()
self.eps = eps
def forward(self, tensor):
return tensor.div(tensor.pow(2).mean(1, True).add(self.eps).pow(0.5))
def __repr__(self):
return 'pixelwise'
class ConvolutionTranspose(nn.Module):
"""
Parameters/Inputs
tensor_size = (None/any integer >0, channels, height, width)
filter_size = list(length=2)/tuple(length=2)/integer
out_channels = return tensor.size(1)
strides = list(length=2)/tuple(length=2)/integer
pad = True/False
activation = "relu"/"relu6"/"lklu"/"tanh"/"sigm"/"maxo"/"rmxo"/"swish"
dropout = 0.-1.
normalization = None/"batch"/"group"/"instance"/"layer"/"pixelwise"
pre_nm = True/False
groups = 1, ... out_channels
weight_nm = True/False -- https://arxiv.org/pdf/1602.07868.pdf
equalized = True/False -- https://arxiv.org/pdf/1710.10196.pdf
"""
def __init__(self, tensor_size, filter_size, out_channels, strides=(1,
1), pad=True, activation='relu', dropout=0.0, normalization=None,
pre_nm=False, groups=1, weight_nm=False, equalized=False, **kwargs):
super(ConvolutionTranspose, self).__init__()
assert len(tensor_size) == 4 and type(tensor_size) in [list, tuple
], 'ConvolutionTranspose -- tensor_size must be of length 4 (tuple or list)'
assert type(filter_size) in [int, list, tuple
], 'Convolution -- filter_size must be int/tuple/list'
if isinstance(filter_size, int):
filter_size = filter_size, filter_size
if isinstance(filter_size, list):
filter_size = tuple(filter_size)
assert len(filter_size
) == 2, 'ConvolutionTranspose -- filter_size length must be 2'
assert type(strides) in [int, list, tuple
], 'ConvolutionTranspose -- strides must be int/tuple/list'
if isinstance(strides, int):
strides = strides, strides
if isinstance(strides, list):
strides = tuple(strides)
assert len(strides
) == 2, 'ConvolutionTranspose -- strides length must be 2'
assert isinstance(pad, bool
), 'ConvolutionTranspose -- pad must be boolean'
assert isinstance(dropout, float
), 'ConvolutionTranspose -- dropout must be float'
assert normalization in [None, 'batch', 'group', 'instance',
'layer', 'pixelwise'
], "Convolution's normalization must be None/batch/group/instance/layer/pixelwise"
assert isinstance(equalized, bool
), 'Convolution -- equalized must be boolean'
self.equalized = equalized
if activation is not None:
activation = activation.lower()
dilation = kwargs['dilation'] if 'dilation' in kwargs.keys() else (1, 1
)
padding = (filter_size[0] // 2, filter_size[1] // 2) if pad else (0, 0)
if dropout > 0.0:
self.dropout = nn.Dropout2d(dropout)
pre_expansion, pst_expansion = 1, 1
if activation in ('maxo', 'rmxo'):
if pre_nm:
pre_expansion = 2
if not pre_nm:
pst_expansion = 2
if pre_nm:
if normalization is not None:
self.Normalization = Normalizations(tensor_size,
normalization, **kwargs)
if activation in ['relu', 'relu6', 'lklu', 'tanh', 'sigm',
'maxo', 'rmxo', 'swish']:
self.Activation = Activations(activation)
if weight_nm:
self.ConvolutionTranspose = nn.utils.weight_norm(nn.
ConvTranspose2d(tensor_size[1] // pre_expansion,
out_channels * pst_expansion, filter_size, strides, padding,
bias=False, dilation=dilation, groups=groups), name='weight')
else:
self.ConvolutionTranspose = nn.ConvTranspose2d(tensor_size[1] //
pre_expansion, out_channels * pst_expansion, filter_size,
strides, padding, bias=False, groups=groups)
nn.init.kaiming_normal_(self.ConvolutionTranspose.weight, nn.
init.calculate_gain('conv2d'))
if equalized:
import numpy as np
gain = kwargs['gain'] if 'gain' in kwargs.keys() else np.sqrt(2
)
fan_in = tensor_size[1] * out_channels * filter_size[0]
self.scale = gain / np.sqrt(fan_in)
self.ConvolutionTranspose.weight.data.mul_(self.scale)
self.oc = self.ConvolutionTranspose.weight.data.size(0)
self.tensor_size = tensor_size[0], out_channels, (tensor_size[2] - 1
) * strides[0] - 2 * padding[0] + filter_size[0], (tensor_size[
3] - 1) * strides[1] - 2 * padding[1] + filter_size[1]
if not pre_nm:
if normalization is not None:
self.Normalization = Normalizations((self.tensor_size[0],
out_channels * pst_expansion, self.tensor_size[2], self
.tensor_size[3]), normalization, **kwargs)
if activation in ['relu', 'relu6', 'lklu', 'tanh', 'sigm',
'maxo', 'rmxo', 'swish']:
self.Activation = Activations(activation)
self.pre_nm = pre_nm
def forward(self, tensor, output_size=None):
if hasattr(self, 'dropout'):
tensor = self.dropout(tensor)
if self.pre_nm:
if hasattr(self, 'Normalization'):
tensor = self.Normalization(tensor)
if hasattr(self, 'Activation'):
tensor = self.Activation(tensor)
if output_size is None:
output_size = self.tensor_size
output_size = tensor.size(0), self.oc, output_size[2], output_size[
3]
tensor = self.ConvolutionTranspose(tensor, output_size=output_size)
if self.equalized:
tensor = tensor.mul(self.scale)
else:
if output_size is None:
output_size = self.tensor_size
output_size = tensor.size(0), self.oc, output_size[2], output_size[
3]
tensor = self.ConvolutionTranspose(tensor, output_size=output_size)
if self.equalized:
tensor = tensor.mul(self.scale)
if hasattr(self, 'Normalization'):
tensor = self.Normalization(tensor)
if hasattr(self, 'Activation'):
tensor = self.Activation(tensor)
return tensor
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'tensor_size': [4, 4, 4, 4], 'filter_size': 4,
'out_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils.data
import torch.utils.data.distributed
import torch.nn.init
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(in_out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(2, 2), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 3, 3), (36, 9, 3, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(144)](buf1, buf2,
144, XBLOCK=128, num_warps=4, num_stages=1)
return buf1, primals_1, primals_2, buf2
def Normalizations(tensor_size=None, normalization=None, available=False,
**kwargs):
"""Does normalization on 4D tensor.
Args:
tensor_size: shape of tensor in BCHW
(None/any integer >0, channels, height, width)
normalization: None/batch/group/instance/layer/pixelwise
available: if True, returns all available normalization methods
groups: for group (GroupNorm), when not provided groups is the center
value of all possible - ex: for a tensor_size[1] = 128, groups is
set to 16 as the possible groups are [1, 2, 4, 8, 16, 32, 64, 128]
affine: for group and instance normalization, default False
elementwise_affine: for layer normalization. default True
"""
list_available = ['batch', 'group', 'instance', 'layer', 'pixelwise']
if available:
return list_available
normalization = normalization.lower()
assert normalization in list_available, 'Normalization must be None/' + '/'.join(
list_available)
if normalization == 'batch':
return torch.nn.BatchNorm2d(tensor_size[1])
elif normalization == 'group':
affine = kwargs['affine'] if 'affine' in kwargs.keys() else False
if 'groups' in kwargs.keys():
return torch.nn.GroupNorm(kwargs['groups'], tensor_size[1],
affine=affine)
else:
possible = [(tensor_size[1] // i) for i in range(tensor_size[1],
0, -1) if tensor_size[1] % i == 0]
groups = possible[len(possible) // 2]
return torch.nn.GroupNorm(groups, tensor_size[1], affine=affine)
elif normalization == 'instance':
affine = kwargs['affine'] if 'affine' in kwargs.keys() else False
return torch.nn.InstanceNorm2d(tensor_size[1], affine=affine)
elif normalization == 'layer':
elementwise_affine = kwargs['elementwise_affine'
] if 'elementwise_affine' in kwargs.keys() else True
return torch.nn.LayerNorm(tensor_size[1:], elementwise_affine=
elementwise_affine)
elif normalization == 'pixelwise':
return PixelWise()
class Activations(nn.Module):
""" All the usual activations along with maxout, relu + maxout and swish.
MaxOut (maxo) - https://arxiv.org/pdf/1302.4389.pdf
Swish - https://arxiv.org/pdf/1710.05941v1.pdf
Args:
activation: relu/relu6/lklu/elu/prelu/tanh/sigm/maxo/rmxo/swish
channels: parameter for prelu, default is 1
"""
def __init__(self, activation='relu', channels=1):
super(Activations, self).__init__()
if activation is not None:
activation = activation.lower()
self.activation = activation
self.function = None
if activation in self.available():
self.function = getattr(self, '_' + activation)
if activation == 'prelu':
self.weight = nn.Parameter(torch.rand(channels))
else:
self.activation = ''
def forward(self, tensor):
if self.function is None:
return tensor
return self.function(tensor)
def _relu(self, tensor):
return F.relu(tensor)
def _relu6(self, tensor):
return F.relu6(tensor)
def _lklu(self, tensor):
return F.leaky_relu(tensor)
def _elu(self, tensor):
return F.elu(tensor)
def _prelu(self, tensor):
return F.prelu(tensor, self.weight)
def _tanh(self, tensor):
return torch.tanh(tensor)
def _sigm(self, tensor):
return torch.sigmoid(tensor)
def _maxo(self, tensor):
assert tensor.size(1) % 2 == 0, 'MaxOut: tensor.size(1) must be even'
return torch.max(*tensor.split(tensor.size(1) // 2, 1))
def _rmxo(self, tensor):
return self._maxo(F.relu(tensor))
def _swish(self, tensor):
return tensor * torch.sigmoid(tensor)
def __repr__(self):
return self.activation
@staticmethod
def available():
return ['relu', 'relu6', 'lklu', 'elu', 'prelu', 'tanh', 'sigm',
'maxo', 'rmxo', 'swish']
class PixelWise(torch.nn.Module):
""" Implemented - https://arxiv.org/pdf/1710.10196.pdf """
def __init__(self, eps=1e-08):
super(PixelWise, self).__init__()
self.eps = eps
def forward(self, tensor):
return tensor.div(tensor.pow(2).mean(1, True).add(self.eps).pow(0.5))
def __repr__(self):
return 'pixelwise'
class ConvolutionTransposeNew(nn.Module):
"""
Parameters/Inputs
tensor_size = (None/any integer >0, channels, height, width)
filter_size = list(length=2)/tuple(length=2)/integer
out_channels = return tensor.size(1)
strides = list(length=2)/tuple(length=2)/integer
pad = True/False
activation = "relu"/"relu6"/"lklu"/"tanh"/"sigm"/"maxo"/"rmxo"/"swish"
dropout = 0.-1.
normalization = None/"batch"/"group"/"instance"/"layer"/"pixelwise"
pre_nm = True/False
groups = 1, ... out_channels
weight_nm = True/False -- https://arxiv.org/pdf/1602.07868.pdf
equalized = True/False -- https://arxiv.org/pdf/1710.10196.pdf
"""
def __init__(self, tensor_size, filter_size, out_channels, strides=(1,
1), pad=True, activation='relu', dropout=0.0, normalization=None,
pre_nm=False, groups=1, weight_nm=False, equalized=False, **kwargs):
super(ConvolutionTransposeNew, self).__init__()
assert len(tensor_size) == 4 and type(tensor_size) in [list, tuple
], 'ConvolutionTranspose -- tensor_size must be of length 4 (tuple or list)'
assert type(filter_size) in [int, list, tuple
], 'Convolution -- filter_size must be int/tuple/list'
if isinstance(filter_size, int):
filter_size = filter_size, filter_size
if isinstance(filter_size, list):
filter_size = tuple(filter_size)
assert len(filter_size
) == 2, 'ConvolutionTranspose -- filter_size length must be 2'
assert type(strides) in [int, list, tuple
], 'ConvolutionTranspose -- strides must be int/tuple/list'
if isinstance(strides, int):
strides = strides, strides
if isinstance(strides, list):
strides = tuple(strides)
assert len(strides
) == 2, 'ConvolutionTranspose -- strides length must be 2'
assert isinstance(pad, bool
), 'ConvolutionTranspose -- pad must be boolean'
assert isinstance(dropout, float
), 'ConvolutionTranspose -- dropout must be float'
assert normalization in [None, 'batch', 'group', 'instance',
'layer', 'pixelwise'
], "Convolution's normalization must be None/batch/group/instance/layer/pixelwise"
assert isinstance(equalized, bool
), 'Convolution -- equalized must be boolean'
self.equalized = equalized
if activation is not None:
activation = activation.lower()
dilation = kwargs['dilation'] if 'dilation' in kwargs.keys() else (1, 1
)
padding = (filter_size[0] // 2, filter_size[1] // 2) if pad else (0, 0)
if dropout > 0.0:
self.dropout = nn.Dropout2d(dropout)
pre_expansion, pst_expansion = 1, 1
if activation in ('maxo', 'rmxo'):
if pre_nm:
pre_expansion = 2
if not pre_nm:
pst_expansion = 2
if pre_nm:
if normalization is not None:
self.Normalization = Normalizations(tensor_size,
normalization, **kwargs)
if activation in ['relu', 'relu6', 'lklu', 'tanh', 'sigm',
'maxo', 'rmxo', 'swish']:
self.Activation = Activations(activation)
if weight_nm:
self.ConvolutionTranspose = nn.utils.weight_norm(nn.
ConvTranspose2d(tensor_size[1] // pre_expansion,
out_channels * pst_expansion, filter_size, strides, padding,
bias=False, dilation=dilation, groups=groups), name='weight')
else:
self.ConvolutionTranspose = nn.ConvTranspose2d(tensor_size[1] //
pre_expansion, out_channels * pst_expansion, filter_size,
strides, padding, bias=False, groups=groups)
nn.init.kaiming_normal_(self.ConvolutionTranspose.weight, nn.
init.calculate_gain('conv2d'))
if equalized:
import numpy as np
gain = kwargs['gain'] if 'gain' in kwargs.keys() else np.sqrt(2
)
fan_in = tensor_size[1] * out_channels * filter_size[0]
self.scale = gain / np.sqrt(fan_in)
self.ConvolutionTranspose.weight.data.mul_(self.scale)
self.oc = self.ConvolutionTranspose.weight.data.size(0)
self.tensor_size = tensor_size[0], out_channels, (tensor_size[2] - 1
) * strides[0] - 2 * padding[0] + filter_size[0], (tensor_size[
3] - 1) * strides[1] - 2 * padding[1] + filter_size[1]
if not pre_nm:
if normalization is not None:
self.Normalization = Normalizations((self.tensor_size[0],
out_channels * pst_expansion, self.tensor_size[2], self
.tensor_size[3]), normalization, **kwargs)
if activation in ['relu', 'relu6', 'lklu', 'tanh', 'sigm',
'maxo', 'rmxo', 'swish']:
self.Activation = Activations(activation)
self.pre_nm = pre_nm
def forward(self, input_0):
primals_1 = self.ConvolutionTranspose.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
| davidwagnerkc/TensorMONK | ConvolutionTranspose | false | 1,809 | [
"MIT"
] | 0 | 3607836d3d6bfd0994e044536b2a51bc84b35f31 | https://github.com/davidwagnerkc/TensorMONK/tree/3607836d3d6bfd0994e044536b2a51bc84b35f31 | import torch
import torch.utils.data
import torch.utils.data.distributed
import torch.nn.init
import torch.nn as nn
import torch.nn.functional as F
def Normalizations(tensor_size=None, normalization=None, available=False,
**kwargs):
"""Does normalization on 4D tensor.
Args:
tensor_size: shape of tensor in BCHW
(None/any integer >0, channels, height, width)
normalization: None/batch/group/instance/layer/pixelwise
available: if True, returns all available normalization methods
groups: for group (GroupNorm), when not provided groups is the center
value of all possible - ex: for a tensor_size[1] = 128, groups is
set to 16 as the possible groups are [1, 2, 4, 8, 16, 32, 64, 128]
affine: for group and instance normalization, default False
elementwise_affine: for layer normalization. default True
"""
list_available = ['batch', 'group', 'instance', 'layer', 'pixelwise']
if available:
return list_available
normalization = normalization.lower()
assert normalization in list_available, 'Normalization must be None/' + '/'.join(
list_available)
if normalization == 'batch':
return torch.nn.BatchNorm2d(tensor_size[1])
elif normalization == 'group':
affine = kwargs['affine'] if 'affine' in kwargs.keys() else False
if 'groups' in kwargs.keys():
return torch.nn.GroupNorm(kwargs['groups'], tensor_size[1],
affine=affine)
else:
possible = [(tensor_size[1] // i) for i in range(tensor_size[1],
0, -1) if tensor_size[1] % i == 0]
groups = possible[len(possible) // 2]
return torch.nn.GroupNorm(groups, tensor_size[1], affine=affine)
elif normalization == 'instance':
affine = kwargs['affine'] if 'affine' in kwargs.keys() else False
return torch.nn.InstanceNorm2d(tensor_size[1], affine=affine)
elif normalization == 'layer':
elementwise_affine = kwargs['elementwise_affine'
] if 'elementwise_affine' in kwargs.keys() else True
return torch.nn.LayerNorm(tensor_size[1:], elementwise_affine=
elementwise_affine)
elif normalization == 'pixelwise':
return PixelWise()
class Activations(nn.Module):
""" All the usual activations along with maxout, relu + maxout and swish.
MaxOut (maxo) - https://arxiv.org/pdf/1302.4389.pdf
Swish - https://arxiv.org/pdf/1710.05941v1.pdf
Args:
activation: relu/relu6/lklu/elu/prelu/tanh/sigm/maxo/rmxo/swish
channels: parameter for prelu, default is 1
"""
def __init__(self, activation='relu', channels=1):
super().__init__()
if activation is not None:
activation = activation.lower()
self.activation = activation
self.function = None
if activation in self.available():
self.function = getattr(self, '_' + activation)
if activation == 'prelu':
self.weight = nn.Parameter(torch.rand(channels))
else:
self.activation = ''
def forward(self, tensor):
if self.function is None:
return tensor
return self.function(tensor)
def _relu(self, tensor):
return F.relu(tensor)
def _relu6(self, tensor):
return F.relu6(tensor)
def _lklu(self, tensor):
return F.leaky_relu(tensor)
def _elu(self, tensor):
return F.elu(tensor)
def _prelu(self, tensor):
return F.prelu(tensor, self.weight)
def _tanh(self, tensor):
return torch.tanh(tensor)
def _sigm(self, tensor):
return torch.sigmoid(tensor)
def _maxo(self, tensor):
assert tensor.size(1) % 2 == 0, 'MaxOut: tensor.size(1) must be even'
return torch.max(*tensor.split(tensor.size(1) // 2, 1))
def _rmxo(self, tensor):
return self._maxo(F.relu(tensor))
def _swish(self, tensor):
r
# ... truncated (>4000 chars) for memory efficiency |
Conv2d | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/jw/cjw2limywkgcxcmakwde6yhejpgbx3f2uhb57pqab2yabyayyfay.py
# Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.constant_pad_nd]
# Source node to ATen node mapping:
# input_1 => constant_pad_nd
# Graph fragment:
# %constant_pad_nd : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%primals_3, [0, 1, 0, 1], 0.0), kwargs = {})
triton_poi_fused_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 5) % 5
x0 = xindex % 5
x2 = (xindex // 25)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 4, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = x0
tmp4 = tmp3 < tmp1
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + (x0 + (4*x1) + (16*x2)), tmp5 & xmask, other=0.0)
tl.store(out_ptr0 + (x3), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/32/c32v7egt4mupqssam3gmac2qgv3ujprjybthsgweflmot256qqw7.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.constant_pad_nd]
stream0 = get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0.run(primals_3, buf0, 400, grid=grid(400), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf2, primals_2, 256, grid=grid(256), stream=stream0)
del primals_2
return (buf2, primals_1, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from torch.nn import Module
import math
import torch
from torch.nn.modules.utils import _pair
import torch.nn.functional as F
import torch.utils.data
from torch.nn.parameter import Parameter
from torch.nn.functional import pad
from torch.nn.modules import Module
def conv2d_same_padding(input, weight, bias=None, stride=1, padding=1,
dilation=1, groups=1):
input_rows = input.size(2)
filter_rows = weight.size(2)
(filter_rows - 1) * dilation[0] + 1
out_rows = (input_rows + stride[0] - 1) // stride[0]
padding_rows = max(0, (out_rows - 1) * stride[0] + (filter_rows - 1) *
dilation[0] + 1 - input_rows)
rows_odd = padding_rows % 2 != 0
padding_cols = max(0, (out_rows - 1) * stride[0] + (filter_rows - 1) *
dilation[0] + 1 - input_rows)
cols_odd = padding_rows % 2 != 0
if rows_odd or cols_odd:
input = pad(input, [0, int(cols_odd), 0, int(rows_odd)])
return F.conv2d(input, weight, bias, stride, padding=(padding_rows // 2,
padding_cols // 2), dilation=dilation, groups=groups)
class _ConvNd(Module):
def __init__(self, in_channels, out_channels, kernel_size, stride,
padding, dilation, transposed, output_padding, groups, bias,
weight_=None):
super(_ConvNd, self).__init__()
if in_channels % groups != 0:
raise ValueError('in_channels must be divisible by groups')
if out_channels % groups != 0:
raise ValueError('out_channels must be divisible by groups')
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.transposed = transposed
self.output_padding = output_padding
self.groups = groups
if transposed:
self.weight = Parameter(torch.Tensor(in_channels, out_channels //
groups, *kernel_size))
elif weight_ is None:
self.weight = Parameter(torch.Tensor(out_channels, in_channels //
groups, *kernel_size))
elif weight_ is not None and isinstance(weight_, torch.Tensor):
self.weight = Parameter(weight_, requires_grad=False)
else:
raise ValueError('不支持的类型!')
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
n = self.in_channels
for k in self.kernel_size:
n *= k
stdv = 1.0 / math.sqrt(n)
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def __repr__(self):
s = (
'{name}({in_channels}, {out_channels}, kernel_size={kernel_size}, stride={stride}'
)
if self.padding != (0,) * len(self.padding):
s += ', padding={padding}'
if self.dilation != (1,) * len(self.dilation):
s += ', dilation={dilation}'
if self.output_padding != (0,) * len(self.output_padding):
s += ', output_padding={output_padding}'
if self.groups != 1:
s += ', groups={groups}'
if self.bias is None:
s += ', bias=False'
s += ')'
return s.format(name=self.__class__.__name__, **self.__dict__)
class Conv2d(_ConvNd):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True, weight_=None):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
super(Conv2d, self).__init__(in_channels, out_channels, kernel_size,
stride, padding, dilation, False, _pair(0), groups, bias,
weight_=weight_)
def forward(self, input):
return conv2d_same_padding(input, self.weight, self.bias, self.
stride, self.padding, self.dilation, self.groups)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.nn import Module
import math
from torch.nn.modules.utils import _pair
import torch.nn.functional as F
import torch.utils.data
from torch.nn.parameter import Parameter
from torch.nn.functional import pad
from torch.nn.modules import Module
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 5 % 5
x0 = xindex % 5
x2 = xindex // 25
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 4, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = x0
tmp4 = tmp3 < tmp1
tmp5 = tmp2 & tmp4
tmp6 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp5 & xmask, other=0.0)
tl.store(out_ptr0 + x3, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0[grid(400)](primals_3, buf0, 400,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(256)](buf2, primals_2, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
return buf2, primals_1, buf0
def conv2d_same_padding(input, weight, bias=None, stride=1, padding=1,
dilation=1, groups=1):
input_rows = input.size(2)
filter_rows = weight.size(2)
(filter_rows - 1) * dilation[0] + 1
out_rows = (input_rows + stride[0] - 1) // stride[0]
padding_rows = max(0, (out_rows - 1) * stride[0] + (filter_rows - 1) *
dilation[0] + 1 - input_rows)
rows_odd = padding_rows % 2 != 0
padding_cols = max(0, (out_rows - 1) * stride[0] + (filter_rows - 1) *
dilation[0] + 1 - input_rows)
cols_odd = padding_rows % 2 != 0
if rows_odd or cols_odd:
input = pad(input, [0, int(cols_odd), 0, int(rows_odd)])
return F.conv2d(input, weight, bias, stride, padding=(padding_rows // 2,
padding_cols // 2), dilation=dilation, groups=groups)
class _ConvNd(Module):
def __init__(self, in_channels, out_channels, kernel_size, stride,
padding, dilation, transposed, output_padding, groups, bias,
weight_=None):
super(_ConvNd, self).__init__()
if in_channels % groups != 0:
raise ValueError('in_channels must be divisible by groups')
if out_channels % groups != 0:
raise ValueError('out_channels must be divisible by groups')
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.transposed = transposed
self.output_padding = output_padding
self.groups = groups
if transposed:
self.weight = Parameter(torch.Tensor(in_channels, out_channels //
groups, *kernel_size))
elif weight_ is None:
self.weight = Parameter(torch.Tensor(out_channels, in_channels //
groups, *kernel_size))
elif weight_ is not None and isinstance(weight_, torch.Tensor):
self.weight = Parameter(weight_, requires_grad=False)
else:
raise ValueError('不支持的类型!')
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
n = self.in_channels
for k in self.kernel_size:
n *= k
stdv = 1.0 / math.sqrt(n)
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def __repr__(self):
s = (
'{name}({in_channels}, {out_channels}, kernel_size={kernel_size}, stride={stride}'
)
if self.padding != (0,) * len(self.padding):
s += ', padding={padding}'
if self.dilation != (1,) * len(self.dilation):
s += ', dilation={dilation}'
if self.output_padding != (0,) * len(self.output_padding):
s += ', output_padding={output_padding}'
if self.groups != 1:
s += ', groups={groups}'
if self.bias is None:
s += ', bias=False'
s += ')'
return s.format(name=self.__class__.__name__, **self.__dict__)
class Conv2dNew(_ConvNd):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True, weight_=None):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
super(Conv2dNew, self).__init__(in_channels, out_channels,
kernel_size, stride, padding, dilation, False, _pair(0), groups,
bias, weight_=weight_)
def forward(self, input_0):
primals_1 = self.weight
primals_2 = self.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| ddayzzz/mmdetection | Conv2d | false | 1,810 | [
"Apache-2.0"
] | 0 | b9940c56cc19b3dda7468a5fd858b9683e93a486 | https://github.com/ddayzzz/mmdetection/tree/b9940c56cc19b3dda7468a5fd858b9683e93a486 | from torch.nn import Module
import math
import torch
from torch.nn.modules.utils import _pair
import torch.nn.functional as F
import torch.utils.data
from torch.nn.parameter import Parameter
from torch.nn.functional import pad
from torch.nn.modules import Module
def conv2d_same_padding(input, weight, bias=None, stride=1, padding=1,
dilation=1, groups=1):
input_rows = input.size(2)
filter_rows = weight.size(2)
(filter_rows - 1) * dilation[0] + 1
out_rows = (input_rows + stride[0] - 1) // stride[0]
padding_rows = max(0, (out_rows - 1) * stride[0] + (filter_rows - 1) *
dilation[0] + 1 - input_rows)
rows_odd = padding_rows % 2 != 0
padding_cols = max(0, (out_rows - 1) * stride[0] + (filter_rows - 1) *
dilation[0] + 1 - input_rows)
cols_odd = padding_rows % 2 != 0
if rows_odd or cols_odd:
input = pad(input, [0, int(cols_odd), 0, int(rows_odd)])
return F.conv2d(input, weight, bias, stride, padding=(padding_rows // 2,
padding_cols // 2), dilation=dilation, groups=groups)
class _ConvNd(Module):
def __init__(self, in_channels, out_channels, kernel_size, stride,
padding, dilation, transposed, output_padding, groups, bias,
weight_=None):
super().__init__()
if in_channels % groups != 0:
raise ValueError('in_channels must be divisible by groups')
if out_channels % groups != 0:
raise ValueError('out_channels must be divisible by groups')
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.transposed = transposed
self.output_padding = output_padding
self.groups = groups
if transposed:
self.weight = Parameter(torch.Tensor(in_channels, out_channels //
groups, *kernel_size))
elif weight_ is None:
self.weight = Parameter(torch.Tensor(out_channels, in_channels //
groups, *kernel_size))
elif weight_ is not None and isinstance(weight_, torch.Tensor):
self.weight = Parameter(weight_, requires_grad=False)
else:
raise ValueError('不支持的类型!')
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
n = self.in_channels
for k in self.kernel_size:
n *= k
stdv = 1.0 / math.sqrt(n)
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def __repr__(self):
s = (
'{name}({in_channels}, {out_channels}, kernel_size={kernel_size}, stride={stride}'
)
if self.padding != (0,) * len(self.padding):
s += ', padding={padding}'
if self.dilation != (1,) * len(self.dilation):
s += ', dilation={dilation}'
if self.output_padding != (0,) * len(self.output_padding):
s += ', output_padding={output_padding}'
if self.groups != 1:
s += ', groups={groups}'
if self.bias is None:
s += ', bias=False'
s += ')'
return s.format(name=self.__class__.__name__, **self.__dict__)
class Model(_ConvNd):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True, weight_=None):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
super().__init__(in_channels, out_channels, kernel_size,
stride, padding, dilation, False, _pair(0), groups, bias,
weight_=weight_)
def forward(self, input):
return conv2d_same_padding(input, self.weight, self.bias, self.
# ... truncated (>4000 chars) for memory efficiency |
GeneralizedDiceLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/d5/cd52mlrkv7fg7ptxw5vaz664ocriv2bgu4unk4oe24v6nbgmv7uv.py
# Topologically Sorted Source Nodes: [mul, intersection, ground_o, pred_o], Original ATen: [aten.mul, aten.sum]
# Source node to ATen node mapping:
# ground_o => sum_2
# intersection => sum_1
# mul => mul
# pred_o => sum_3
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %arg0_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [2, 3]), kwargs = {})
# %sum_2 : [num_users=2] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%arg1_1, [2, 3]), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%arg0_1, [2, 3]), kwargs = {})
triton_per_fused_mul_sum_0 = async_compile.triton('triton_per_fused_mul_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mul_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + (16*x0)), xmask, other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp7 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp13 = tl.where(xmask, tmp11, 0)
tmp14 = tl.sum(tmp13, 1)[:, None]
tl.store(out_ptr0 + (x0), tmp6, xmask)
tl.store(out_ptr1 + (x0), tmp10, xmask)
tl.store(out_ptr2 + (x0), tmp14, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/6l/c6l2gkrph5u6qhzfpwcqqfu277drwwd7ra3dyhys4nf64xwbbpre.py
# Topologically Sorted Source Nodes: [setitem], Original ATen: [aten.lift_fresh, aten.index_put]
# Source node to ATen node mapping:
# setitem => full_default, index_put
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False})
# %index_put : [num_users=1] = call_function[target=torch.ops.aten.index_put.default](args = (%select, [%isinf], %full_default), kwargs = {})
triton_poi_fused_index_put_lift_fresh_1 = async_compile.triton('triton_poi_fused_index_put_lift_fresh_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_put_lift_fresh_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_index_put_lift_fresh_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tmp0 * tmp0
tmp2 = tl.full([1], 1, tl.int32)
tmp3 = tmp2 / tmp1
tmp4 = libdevice.isinf(tmp3).to(tl.int1)
tmp5 = 0.0
tmp6 = tl.where(tmp4, tmp5, tmp3)
tl.store(out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/cy/ccy7idfxu272yvofjbbp54inr6a5zn7jkvgbq7732tfnhhq2lmr7.py
# Topologically Sorted Source Nodes: [mul_1, w], Original ATen: [aten.mul, aten.reciprocal]
# Source node to ATen node mapping:
# mul_1 => mul_1
# w => reciprocal
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_2, %sum_2), kwargs = {})
# %reciprocal : [num_users=2] = call_function[target=torch.ops.aten.reciprocal.default](args = (%mul_1,), kwargs = {})
# %select_scatter_default : [num_users=2] = call_function[target=torch.ops.aten.select_scatter.default](args = (%reciprocal, %index_put, 0, 0), kwargs = {})
triton_poi_fused_mul_reciprocal_2 = async_compile.triton('triton_poi_fused_mul_reciprocal_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_reciprocal_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_reciprocal_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x0 = xindex % 4
x2 = xindex
tmp3 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (x2), xmask)
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = tmp0 == tmp1
tmp5 = tmp4 * tmp4
tmp6 = tl.full([1], 1, tl.int32)
tmp7 = tmp6 / tmp5
tmp8 = tl.where(tmp2, tmp3, tmp7)
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/jk/cjkuxs5m7ejvswg7hrk2llstygqftgn2i6wdk7h25jxqaj665z6b.py
# Topologically Sorted Source Nodes: [max_1, setitem_1], Original ATen: [aten.max, aten.index_put]
# Source node to ATen node mapping:
# max_1 => max_1
# setitem_1 => index_put_1
# Graph fragment:
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.default](args = (%select_4,), kwargs = {})
# %index_put_1 : [num_users=1] = call_function[target=torch.ops.aten.index_put_.default](args = (%select_4, [%isinf], %max_1), kwargs = {})
triton_per_fused_index_put_max_3 = async_compile.triton('triton_per_fused_index_put_max_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_index_put_max_3', 'mutated_arg_names': ['out_ptr2'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_index_put_max_3(in_ptr0, in_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp2 = tl.load(in_ptr0 + (r0), None)
tmp3 = tl.load(in_ptr1 + (r0), None)
tmp0 = tl.full([1, 1], 0, tl.int32)
tmp1 = tmp0 == tmp0
tmp4 = tmp3 * tmp3
tmp5 = tl.full([1, 1], 1, tl.int32)
tmp6 = tmp5 / tmp4
tmp7 = tl.where(tmp1, tmp2, tmp6)
tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK])
tmp10 = triton_helpers.max2(tmp8, 1)[:, None]
tmp11 = libdevice.isinf(tmp6).to(tl.int1)
tmp12 = tl.where(tmp11, tmp10, tmp7)
tl.store(out_ptr2 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp12, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/4q/c4ql4xyg2xlaa3uezk7gjgywmt3fw3tynazq4ckpuwcsgzw6zhei.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %select_scatter_default_1 : [num_users=2] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default, %index_put_1, 0, 0), kwargs = {})
triton_poi_fused_4 = async_compile.triton('triton_poi_fused_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x0 = xindex % 4
x2 = xindex
tmp3 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (x2), xmask)
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = tmp0 == tmp1
tmp5 = tl.where(tmp2, tmp3, tmp4)
tl.store(out_ptr0 + (x2), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/o5/co5ocweef7wvpqwak7rmrihdxwrqz6mechmdzkcte5vjlsv5tdhx.py
# Topologically Sorted Source Nodes: [setitem_2], Original ATen: [aten.lift_fresh, aten.index_put]
# Source node to ATen node mapping:
# setitem_2 => full_default_1, index_put_2
# Graph fragment:
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False})
# %index_put_2 : [num_users=1] = call_function[target=torch.ops.aten.index_put_.default](args = (%select_6, [%isinf_1], %full_default_1), kwargs = {})
triton_poi_fused_index_put_lift_fresh_5 = async_compile.triton('triton_poi_fused_index_put_lift_fresh_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_put_lift_fresh_5', 'mutated_arg_names': ['out_ptr1'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_index_put_lift_fresh_5(in_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp3 = tl.load(in_ptr0 + (x0), xmask)
tmp4 = tl.load(in_ptr0 + (4 + x0), xmask)
tmp0 = tl.full([1], 1, tl.int32)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = tmp0 == tmp1
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = libdevice.isinf(tmp5).to(tl.int1)
tmp7 = 0.0
tmp8 = tl.where(tmp6, tmp7, tmp5)
tl.store(out_ptr1 + (4 + x0), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/yv/cyvyap3cqstn6kbri4i6qtdbvfru2ywjrkts4h2byghwjatyop4k.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %select_scatter_default_2 : [num_users=2] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_1, %index_put_2, 0, 1), kwargs = {})
triton_poi_fused_6 = async_compile.triton('triton_poi_fused_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_6(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x0 = xindex % 4
x2 = xindex
tmp3 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (x2), xmask)
tmp0 = x1
tmp1 = tl.full([1], 1, tl.int32)
tmp2 = tmp0 == tmp1
tmp5 = tl.where(tmp2, tmp3, tmp4)
tl.store(out_ptr0 + (x2), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/tg/ctgfsxxk2urovszhep4mbnqcv7roo2a5o633p6v6krrzhegbz6tr.py
# Topologically Sorted Source Nodes: [max_2, setitem_3], Original ATen: [aten.max, aten.index_put]
# Source node to ATen node mapping:
# max_2 => max_2
# setitem_3 => index_put_3
# Graph fragment:
# %max_2 : [num_users=1] = call_function[target=torch.ops.aten.max.default](args = (%select_7,), kwargs = {})
# %index_put_3 : [num_users=1] = call_function[target=torch.ops.aten.index_put_.default](args = (%select_7, [%isinf_1], %max_2), kwargs = {})
triton_per_fused_index_put_max_7 = async_compile.triton('triton_per_fused_index_put_max_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_index_put_max_7', 'mutated_arg_names': ['out_ptr2'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_index_put_max_7(in_ptr0, in_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp2 = tl.load(in_ptr0 + (4 + r0), None)
tmp9 = tl.load(in_ptr1 + (r0), None)
tmp10 = tl.load(in_ptr1 + (4 + r0), None)
tmp0 = tl.full([1, 1], 1, tl.int32)
tmp1 = tmp0 == tmp0
tmp3 = tl.where(tmp1, tmp2, tmp2)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = triton_helpers.max2(tmp4, 1)[:, None]
tmp7 = tl.full([1, 1], 0, tl.int32)
tmp8 = tmp0 == tmp7
tmp11 = tl.where(tmp8, tmp9, tmp10)
tmp12 = libdevice.isinf(tmp11).to(tl.int1)
tmp13 = tl.where(tmp12, tmp6, tmp3)
tl.store(out_ptr2 + (tl.broadcast_to(4 + r0, [XBLOCK, RBLOCK])), tmp13, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/fy/cfy4vgr7piqugeft2crnhfbw5ielxsgean22zjdvpklq4txeer5o.py
# Topologically Sorted Source Nodes: [setitem_4], Original ATen: [aten.lift_fresh, aten.index_put]
# Source node to ATen node mapping:
# setitem_4 => full_default_2, index_put_4
# Graph fragment:
# %full_default_2 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False})
# %index_put_4 : [num_users=1] = call_function[target=torch.ops.aten.index_put_.default](args = (%select_9, [%isinf_2], %full_default_2), kwargs = {})
triton_poi_fused_index_put_lift_fresh_8 = async_compile.triton('triton_poi_fused_index_put_lift_fresh_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_put_lift_fresh_8', 'mutated_arg_names': ['out_ptr1'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_index_put_lift_fresh_8(in_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp3 = tl.load(in_ptr0 + (4 + x0), xmask)
tmp4 = tl.load(in_ptr0 + (8 + x0), xmask)
tmp0 = tl.full([1], 2, tl.int32)
tmp1 = tl.full([1], 1, tl.int32)
tmp2 = tmp0 == tmp1
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = libdevice.isinf(tmp5).to(tl.int1)
tmp7 = 0.0
tmp8 = tl.where(tmp6, tmp7, tmp5)
tl.store(out_ptr1 + (8 + x0), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/zj/czjcmriyujs5brmyh7stjrkfk37e4twzbljor3wofssbrxsuw63t.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %select_scatter_default_4 : [num_users=2] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_3, %index_put_4, 0, 2), kwargs = {})
triton_poi_fused_9 = async_compile.triton('triton_poi_fused_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_9(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x0 = xindex % 4
x2 = xindex
tmp3 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (x2), xmask)
tmp0 = x1
tmp1 = tl.full([1], 2, tl.int32)
tmp2 = tmp0 == tmp1
tmp5 = tl.where(tmp2, tmp3, tmp4)
tl.store(out_ptr0 + (x2), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/hx/chxbr5aueebc6hjsn4ej4rftudtvp5pbxyu5cbojq5zy2lksc2pa.py
# Topologically Sorted Source Nodes: [max_3, setitem_5], Original ATen: [aten.max, aten.index_put]
# Source node to ATen node mapping:
# max_3 => max_3
# setitem_5 => index_put_5
# Graph fragment:
# %max_3 : [num_users=1] = call_function[target=torch.ops.aten.max.default](args = (%select_10,), kwargs = {})
# %index_put_5 : [num_users=1] = call_function[target=torch.ops.aten.index_put_.default](args = (%select_10, [%isinf_2], %max_3), kwargs = {})
triton_per_fused_index_put_max_10 = async_compile.triton('triton_per_fused_index_put_max_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_index_put_max_10', 'mutated_arg_names': ['out_ptr2'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_index_put_max_10(in_ptr0, in_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp2 = tl.load(in_ptr0 + (8 + r0), None)
tmp9 = tl.load(in_ptr1 + (4 + r0), None)
tmp10 = tl.load(in_ptr1 + (8 + r0), None)
tmp0 = tl.full([1, 1], 2, tl.int32)
tmp1 = tmp0 == tmp0
tmp3 = tl.where(tmp1, tmp2, tmp2)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = triton_helpers.max2(tmp4, 1)[:, None]
tmp7 = tl.full([1, 1], 1, tl.int32)
tmp8 = tmp0 == tmp7
tmp11 = tl.where(tmp8, tmp9, tmp10)
tmp12 = libdevice.isinf(tmp11).to(tl.int1)
tmp13 = tl.where(tmp12, tmp6, tmp3)
tl.store(out_ptr2 + (tl.broadcast_to(8 + r0, [XBLOCK, RBLOCK])), tmp13, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/wq/cwqjjijigoiri7ebhb452bt4tizigzg2o6ji6lsekghkviga7xp3.py
# Topologically Sorted Source Nodes: [setitem_6], Original ATen: [aten.lift_fresh, aten.index_put]
# Source node to ATen node mapping:
# setitem_6 => full_default_3, index_put_6
# Graph fragment:
# %full_default_3 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False})
# %index_put_6 : [num_users=1] = call_function[target=torch.ops.aten.index_put_.default](args = (%select_12, [%isinf_3], %full_default_3), kwargs = {})
triton_poi_fused_index_put_lift_fresh_11 = async_compile.triton('triton_poi_fused_index_put_lift_fresh_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_put_lift_fresh_11', 'mutated_arg_names': ['out_ptr1'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_index_put_lift_fresh_11(in_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp3 = tl.load(in_ptr0 + (8 + x0), xmask)
tmp4 = tl.load(in_ptr0 + (12 + x0), xmask)
tmp0 = tl.full([1], 3, tl.int32)
tmp1 = tl.full([1], 2, tl.int32)
tmp2 = tmp0 == tmp1
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = libdevice.isinf(tmp5).to(tl.int1)
tmp7 = 0.0
tmp8 = tl.where(tmp6, tmp7, tmp5)
tl.store(out_ptr1 + (12 + x0), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/ux/cuxhakfkc47pmz4gov3quzmpxdbvtixikhnf3nicsxfr6rvsdnli.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %select_scatter_default_6 : [num_users=2] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_5, %index_put_6, 0, 3), kwargs = {})
triton_poi_fused_12 = async_compile.triton('triton_poi_fused_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_12(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x0 = xindex % 4
x2 = xindex
tmp3 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (x2), xmask)
tmp0 = x1
tmp1 = tl.full([1], 3, tl.int32)
tmp2 = tmp0 == tmp1
tmp5 = tl.where(tmp2, tmp3, tmp4)
tl.store(out_ptr0 + (x2), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/lf/clf4pxfzowgmhow2bm4nhu2drefrnqqu6uqv5egtoum633aeciwm.py
# Topologically Sorted Source Nodes: [max_4, setitem_7], Original ATen: [aten.max, aten.index_put]
# Source node to ATen node mapping:
# max_4 => max_4
# setitem_7 => index_put_7
# Graph fragment:
# %max_4 : [num_users=1] = call_function[target=torch.ops.aten.max.default](args = (%select_13,), kwargs = {})
# %index_put_7 : [num_users=1] = call_function[target=torch.ops.aten.index_put_.default](args = (%select_13, [%isinf_3], %max_4), kwargs = {})
triton_per_fused_index_put_max_13 = async_compile.triton('triton_per_fused_index_put_max_13', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_index_put_max_13', 'mutated_arg_names': ['out_ptr2'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_index_put_max_13(in_ptr0, in_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp2 = tl.load(in_ptr0 + (12 + r0), None)
tmp9 = tl.load(in_ptr1 + (8 + r0), None)
tmp10 = tl.load(in_ptr1 + (12 + r0), None)
tmp0 = tl.full([1, 1], 3, tl.int32)
tmp1 = tmp0 == tmp0
tmp3 = tl.where(tmp1, tmp2, tmp2)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = triton_helpers.max2(tmp4, 1)[:, None]
tmp7 = tl.full([1, 1], 2, tl.int32)
tmp8 = tmp0 == tmp7
tmp11 = tl.where(tmp8, tmp9, tmp10)
tmp12 = libdevice.isinf(tmp11).to(tl.int1)
tmp13 = tl.where(tmp12, tmp6, tmp3)
tl.store(out_ptr2 + (tl.broadcast_to(12 + r0, [XBLOCK, RBLOCK])), tmp13, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/s2/cs2wyfvtdb7vmxx7u5otf7upxsggzrwonnusuhl3h5txytfu73v3.py
# Topologically Sorted Source Nodes: [mul_2, sum_4, mul_3, add_1, denominator, mul_4, sum_5, add_2, truediv, f, f_1], Original ATen: [aten.mul, aten.sum, aten.add, aten.div, aten.rsub, aten.mean]
# Source node to ATen node mapping:
# add_1 => add_1
# add_2 => add_2
# denominator => add
# f => sub
# f_1 => mean
# mul_2 => mul_2
# mul_3 => mul_3
# mul_4 => mul_4
# sum_4 => sum_4
# sum_5 => sum_5
# truediv => div
# Graph fragment:
# %select_scatter_default_7 : [num_users=2] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_6, %index_put_7, 0, 3), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, %select_scatter_default_7), kwargs = {})
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_2, [1]), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_4, 2.0), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, 1e-05), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, %sum_3), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, %select_scatter_default_7), kwargs = {})
# %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_4, [1]), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_5, 1e-05), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_1, %add_2), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %div), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub,), kwargs = {})
triton_per_fused_add_div_mean_mul_rsub_sum_14 = async_compile.triton('triton_per_fused_add_div_mean_mul_rsub_sum_14', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {5: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=(5,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mean_mul_rsub_sum_14', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 20, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mean_mul_rsub_sum_14(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (4*r0), None, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (12))
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp6 = tl.load(in_ptr1 + (4*r0), None, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (13))
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp12 = tl.load(in_ptr1 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr1 + (14))
tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK])
tmp19 = tl.load(in_ptr1 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr1 + (15))
tmp25 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK])
tmp26 = tl.load(in_ptr1 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + (4*r0), None, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr3 + (4*r0), None, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr2 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr3 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp39 = tl.load(in_ptr2 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp40 = tl.load(in_ptr3 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp44 = tl.load(in_ptr2 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp45 = tl.load(in_ptr3 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp1 = r0
tmp2 = tl.full([1, 1], 3, tl.int32)
tmp3 = tmp1 == tmp2
tmp7 = tl.where(tmp3, tmp5, tmp6)
tmp8 = tmp0 * tmp7
tmp13 = tl.where(tmp3, tmp11, tmp12)
tmp14 = tmp9 * tmp13
tmp15 = tmp8 + tmp14
tmp20 = tl.where(tmp3, tmp18, tmp19)
tmp21 = tmp16 * tmp20
tmp22 = tmp15 + tmp21
tmp27 = tl.where(tmp3, tmp25, tmp26)
tmp28 = tmp23 * tmp27
tmp29 = tmp22 + tmp28
tmp32 = tmp30 + tmp31
tmp33 = tmp32 * tmp7
tmp36 = tmp34 + tmp35
tmp37 = tmp36 * tmp13
tmp38 = tmp33 + tmp37
tmp41 = tmp39 + tmp40
tmp42 = tmp41 * tmp20
tmp43 = tmp38 + tmp42
tmp46 = tmp44 + tmp45
tmp47 = tmp46 * tmp27
tmp48 = tmp43 + tmp47
tmp49 = 2.0
tmp50 = tmp29 * tmp49
tmp51 = 1e-05
tmp52 = tmp50 + tmp51
tmp53 = tmp48 + tmp51
tmp54 = tmp52 / tmp53
tmp55 = 1.0
tmp56 = tmp55 - tmp54
tmp57 = tl.broadcast_to(tmp56, [XBLOCK, RBLOCK])
tmp59 = tl.sum(tmp57, 1)[:, None]
tmp60 = 4.0
tmp61 = tmp59 / tmp60
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp61, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf29 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, intersection, ground_o, pred_o], Original ATen: [aten.mul, aten.sum]
stream0 = get_raw_stream(0)
triton_per_fused_mul_sum_0.run(arg1_1, arg0_1, buf0, buf1, buf29, 16, 16, grid=grid(16), stream=stream0)
del arg0_1
del arg1_1
buf2 = empty_strided_cuda((4, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [setitem], Original ATen: [aten.lift_fresh, aten.index_put]
triton_poi_fused_index_put_lift_fresh_1.run(buf1, buf2, 4, grid=grid(4), stream=stream0)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul_1, w], Original ATen: [aten.mul, aten.reciprocal]
triton_poi_fused_mul_reciprocal_2.run(buf2, buf1, buf4, 16, grid=grid(16), stream=stream0)
# Topologically Sorted Source Nodes: [max_1, setitem_1], Original ATen: [aten.max, aten.index_put]
triton_per_fused_index_put_max_3.run(buf2, buf1, buf4, 1, 4, grid=grid(1), stream=stream0)
del buf2
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_4.run(buf4, buf7, 16, grid=grid(16), stream=stream0)
# Topologically Sorted Source Nodes: [setitem_2], Original ATen: [aten.lift_fresh, aten.index_put]
triton_poi_fused_index_put_lift_fresh_5.run(buf4, buf7, 4, grid=grid(4), stream=stream0)
buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_6.run(buf7, buf11, 16, grid=grid(16), stream=stream0)
# Topologically Sorted Source Nodes: [max_2, setitem_3], Original ATen: [aten.max, aten.index_put]
triton_per_fused_index_put_max_7.run(buf7, buf4, buf11, 1, 4, grid=grid(1), stream=stream0)
buf14 = buf7; del buf7 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_6.run(buf11, buf14, 16, grid=grid(16), stream=stream0)
# Topologically Sorted Source Nodes: [setitem_4], Original ATen: [aten.lift_fresh, aten.index_put]
triton_poi_fused_index_put_lift_fresh_8.run(buf11, buf14, 4, grid=grid(4), stream=stream0)
buf18 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_9.run(buf14, buf18, 16, grid=grid(16), stream=stream0)
# Topologically Sorted Source Nodes: [max_3, setitem_5], Original ATen: [aten.max, aten.index_put]
triton_per_fused_index_put_max_10.run(buf14, buf11, buf18, 1, 4, grid=grid(1), stream=stream0)
buf21 = buf14; del buf14 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_9.run(buf18, buf21, 16, grid=grid(16), stream=stream0)
# Topologically Sorted Source Nodes: [setitem_6], Original ATen: [aten.lift_fresh, aten.index_put]
triton_poi_fused_index_put_lift_fresh_11.run(buf18, buf21, 4, grid=grid(4), stream=stream0)
buf25 = buf11; del buf11 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_12.run(buf21, buf25, 16, grid=grid(16), stream=stream0)
# Topologically Sorted Source Nodes: [max_4, setitem_7], Original ATen: [aten.max, aten.index_put]
triton_per_fused_index_put_max_13.run(buf21, buf18, buf25, 1, 4, grid=grid(1), stream=stream0)
del buf18
del buf21
buf31 = empty_strided_cuda((), (), torch.float32)
buf32 = buf31; del buf31 # reuse
# Topologically Sorted Source Nodes: [mul_2, sum_4, mul_3, add_1, denominator, mul_4, sum_5, add_2, truediv, f, f_1], Original ATen: [aten.mul, aten.sum, aten.add, aten.div, aten.rsub, aten.mean]
triton_per_fused_add_div_mean_mul_rsub_sum_14.run(buf32, buf0, buf25, buf1, buf29, 1, 4, grid=grid(1), stream=stream0)
del buf0
del buf1
del buf25
del buf29
return (buf32, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import collections
import torch
import warnings
from typing import Optional
from typing import Union
from typing import Any
from typing import Callable
from typing import Tuple
import torch.nn
from torch.nn.modules.loss import _Loss
from enum import Enum
import collections.abc
def issequenceiterable(obj: 'Any') ->bool:
"""
Determine if the object is an iterable sequence and is not a string.
"""
if torch.is_tensor(obj):
return int(obj.dim()) > 0
return isinstance(obj, collections.abc.Iterable) and not isinstance(obj,
str)
def ensure_tuple(vals: 'Any') ->Tuple[Any, ...]:
"""
Returns a tuple of `vals`.
"""
if not issequenceiterable(vals):
vals = vals,
return tuple(vals)
def ensure_tuple_size(tup: 'Any', dim: 'int', pad_val: 'Any'=0) ->Tuple[Any,
...]:
"""
Returns a copy of `tup` with `dim` values by either shortened or padded with `pad_val` as necessary.
"""
tup = ensure_tuple(tup) + (pad_val,) * dim
return tuple(tup[:dim])
def one_hot(labels: 'torch.Tensor', num_classes: 'int', dtype:
'torch.dtype'=torch.float, dim: 'int'=1) ->torch.Tensor:
"""
For a tensor `labels` of dimensions B1[spatial_dims], return a tensor of dimensions `BN[spatial_dims]`
for `num_classes` N number of classes.
Example:
For every value v = labels[b,1,h,w], the value in the result at [b,v,h,w] will be 1 and all others 0.
Note that this will include the background label, thus a binary mask should be treated as having 2 classes.
"""
assert labels.dim() > 0, 'labels should have dim of 1 or more.'
if labels.ndim < dim + 1:
shape = ensure_tuple_size(labels.shape, dim + 1, 1)
labels = labels.reshape(*shape)
sh = list(labels.shape)
assert sh[dim
] == 1, 'labels should have a channel with length equals to one.'
sh[dim] = num_classes
o = torch.zeros(size=sh, dtype=dtype, device=labels.device)
labels = o.scatter_(dim=dim, index=labels.long(), value=1)
return labels
class LossReduction(Enum):
"""
See also:
- :py:class:`monai.losses.dice.DiceLoss`
- :py:class:`monai.losses.dice.GeneralizedDiceLoss`
- :py:class:`monai.losses.focal_loss.FocalLoss`
- :py:class:`monai.losses.tversky.TverskyLoss`
"""
NONE = 'none'
MEAN = 'mean'
SUM = 'sum'
class Weight(Enum):
"""
See also: :py:class:`monai.losses.dice.GeneralizedDiceLoss`
"""
SQUARE = 'square'
SIMPLE = 'simple'
UNIFORM = 'uniform'
class GeneralizedDiceLoss(_Loss):
"""
Compute the generalised Dice loss defined in:
Sudre, C. et. al. (2017) Generalised Dice overlap as a deep learning
loss function for highly unbalanced segmentations. DLMIA 2017.
Adapted from:
https://github.com/NifTK/NiftyNet/blob/v0.6.0/niftynet/layer/loss_segmentation.py#L279
"""
def __init__(self, include_background: 'bool'=True, to_onehot_y: 'bool'
=False, sigmoid: 'bool'=False, softmax: 'bool'=False, other_act:
'Optional[Callable]'=None, w_type: 'Union[Weight, str]'=Weight.
SQUARE, reduction: 'Union[LossReduction, str]'=LossReduction.MEAN,
smooth_nr: 'float'=1e-05, smooth_dr: 'float'=1e-05, batch: 'bool'=False
) ->None:
"""
Args:
include_background: If False channel index 0 (background category) is excluded from the calculation.
to_onehot_y: whether to convert `y` into the one-hot format. Defaults to False.
sigmoid: If True, apply a sigmoid function to the prediction.
softmax: If True, apply a softmax function to the prediction.
other_act: if don't want to use `sigmoid` or `softmax`, use other callable function to execute
other activation layers, Defaults to ``None``. for example:
`other_act = torch.tanh`.
squared_pred: use squared versions of targets and predictions in the denominator or not.
w_type: {``"square"``, ``"simple"``, ``"uniform"``}
Type of function to transform ground truth volume to a weight factor. Defaults to ``"square"``.
reduction: {``"none"``, ``"mean"``, ``"sum"``}
Specifies the reduction to apply to the output. Defaults to ``"mean"``.
- ``"none"``: no reduction will be applied.
- ``"mean"``: the sum of the output will be divided by the number of elements in the output.
- ``"sum"``: the output will be summed.
smooth_nr: a small constant added to the numerator to avoid zero.
smooth_dr: a small constant added to the denominator to avoid nan.
batch: whether to sum the intersection and union areas over the batch dimension before the dividing.
Defaults to False, intersection over union is computed from each item in the batch.
Raises:
TypeError: When ``other_act`` is not an ``Optional[Callable]``.
ValueError: When more than 1 of [``sigmoid=True``, ``softmax=True``, ``other_act is not None``].
Incompatible values.
"""
super().__init__(reduction=LossReduction(reduction).value)
if other_act is not None and not callable(other_act):
raise TypeError(
f'other_act must be None or callable but is {type(other_act).__name__}.'
)
if int(sigmoid) + int(softmax) + int(other_act is not None) > 1:
raise ValueError(
'Incompatible values: more than 1 of [sigmoid=True, softmax=True, other_act is not None].'
)
self.include_background = include_background
self.to_onehot_y = to_onehot_y
self.sigmoid = sigmoid
self.softmax = softmax
self.other_act = other_act
w_type = Weight(w_type)
self.w_func: 'Callable' = torch.ones_like
if w_type == Weight.SIMPLE:
self.w_func = torch.reciprocal
elif w_type == Weight.SQUARE:
self.w_func = lambda x: torch.reciprocal(x * x)
self.smooth_nr = float(smooth_nr)
self.smooth_dr = float(smooth_dr)
self.batch = batch
def forward(self, input: 'torch.Tensor', target: 'torch.Tensor'
) ->torch.Tensor:
"""
Args:
input: the shape should be BNH[WD].
target: the shape should be BNH[WD].
Raises:
ValueError: When ``self.reduction`` is not one of ["mean", "sum", "none"].
"""
if self.sigmoid:
input = torch.sigmoid(input)
n_pred_ch = input.shape[1]
if self.softmax:
if n_pred_ch == 1:
warnings.warn(
'single channel prediction, `softmax=True` ignored.')
else:
input = torch.softmax(input, 1)
if self.other_act is not None:
input = self.other_act(input)
if self.to_onehot_y:
if n_pred_ch == 1:
warnings.warn(
'single channel prediction, `to_onehot_y=True` ignored.')
else:
target = one_hot(target, num_classes=n_pred_ch)
if not self.include_background:
if n_pred_ch == 1:
warnings.warn(
'single channel prediction, `include_background=False` ignored.'
)
else:
target = target[:, 1:]
input = input[:, 1:]
assert target.shape == input.shape, f'ground truth has differing shape ({target.shape}) from input ({input.shape})'
reduce_axis = list(range(2, len(input.shape)))
if self.batch:
reduce_axis = [0] + reduce_axis
intersection = torch.sum(target * input, reduce_axis)
ground_o = torch.sum(target, reduce_axis)
pred_o = torch.sum(input, reduce_axis)
denominator = ground_o + pred_o
w = self.w_func(ground_o.float())
for b in w:
infs = torch.isinf(b)
b[infs] = 0.0
b[infs] = torch.max(b)
f: 'torch.Tensor' = 1.0 - (2.0 * (intersection * w).sum(0 if self.
batch else 1) + self.smooth_nr) / ((denominator * w).sum(0 if
self.batch else 1) + self.smooth_dr)
if self.reduction == LossReduction.MEAN.value:
f = torch.mean(f)
elif self.reduction == LossReduction.SUM.value:
f = torch.sum(f)
elif self.reduction == LossReduction.NONE.value:
pass
else:
raise ValueError(
f'Unsupported reduction: {self.reduction}, available options are ["mean", "sum", "none"].'
)
return f
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import collections
from typing import Optional
from typing import Union
from typing import Any
from typing import Callable
from typing import Tuple
import torch.nn
from torch.nn.modules.loss import _Loss
from enum import Enum
import collections.abc
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + 16 * x0), xmask, other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp7 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp13 = tl.where(xmask, tmp11, 0)
tmp14 = tl.sum(tmp13, 1)[:, None]
tl.store(out_ptr0 + x0, tmp6, xmask)
tl.store(out_ptr1 + x0, tmp10, xmask)
tl.store(out_ptr2 + x0, tmp14, xmask)
@triton.jit
def triton_poi_fused_index_put_lift_fresh_1(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tmp0 * tmp0
tmp2 = tl.full([1], 1, tl.int32)
tmp3 = tmp2 / tmp1
tmp4 = libdevice.isinf(tmp3).to(tl.int1)
tmp5 = 0.0
tmp6 = tl.where(tmp4, tmp5, tmp3)
tl.store(out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_mul_reciprocal_2(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp3 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + x2, xmask)
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = tmp0 == tmp1
tmp5 = tmp4 * tmp4
tmp6 = tl.full([1], 1, tl.int32)
tmp7 = tmp6 / tmp5
tmp8 = tl.where(tmp2, tmp3, tmp7)
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_per_fused_index_put_max_3(in_ptr0, in_ptr1, out_ptr2, xnumel,
rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp2 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp0 = tl.full([1, 1], 0, tl.int32)
tmp1 = tmp0 == tmp0
tmp4 = tmp3 * tmp3
tmp5 = tl.full([1, 1], 1, tl.int32)
tmp6 = tmp5 / tmp4
tmp7 = tl.where(tmp1, tmp2, tmp6)
tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK])
tmp10 = triton_helpers.max2(tmp8, 1)[:, None]
tmp11 = libdevice.isinf(tmp6).to(tl.int1)
tmp12 = tl.where(tmp11, tmp10, tmp7)
tl.store(out_ptr2 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp12, None)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp3 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + x2, xmask)
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = tmp0 == tmp1
tmp5 = tl.where(tmp2, tmp3, tmp4)
tl.store(out_ptr0 + x2, tmp5, xmask)
@triton.jit
def triton_poi_fused_index_put_lift_fresh_5(in_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp3 = tl.load(in_ptr0 + x0, xmask)
tmp4 = tl.load(in_ptr0 + (4 + x0), xmask)
tmp0 = tl.full([1], 1, tl.int32)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = tmp0 == tmp1
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = libdevice.isinf(tmp5).to(tl.int1)
tmp7 = 0.0
tmp8 = tl.where(tmp6, tmp7, tmp5)
tl.store(out_ptr1 + (4 + x0), tmp8, xmask)
@triton.jit
def triton_poi_fused_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp3 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + x2, xmask)
tmp0 = x1
tmp1 = tl.full([1], 1, tl.int32)
tmp2 = tmp0 == tmp1
tmp5 = tl.where(tmp2, tmp3, tmp4)
tl.store(out_ptr0 + x2, tmp5, xmask)
@triton.jit
def triton_per_fused_index_put_max_7(in_ptr0, in_ptr1, out_ptr2, xnumel,
rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp2 = tl.load(in_ptr0 + (4 + r0), None)
tmp9 = tl.load(in_ptr1 + r0, None)
tmp10 = tl.load(in_ptr1 + (4 + r0), None)
tmp0 = tl.full([1, 1], 1, tl.int32)
tmp1 = tmp0 == tmp0
tmp3 = tl.where(tmp1, tmp2, tmp2)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = triton_helpers.max2(tmp4, 1)[:, None]
tmp7 = tl.full([1, 1], 0, tl.int32)
tmp8 = tmp0 == tmp7
tmp11 = tl.where(tmp8, tmp9, tmp10)
tmp12 = libdevice.isinf(tmp11).to(tl.int1)
tmp13 = tl.where(tmp12, tmp6, tmp3)
tl.store(out_ptr2 + tl.broadcast_to(4 + r0, [XBLOCK, RBLOCK]), tmp13, None)
@triton.jit
def triton_poi_fused_index_put_lift_fresh_8(in_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp3 = tl.load(in_ptr0 + (4 + x0), xmask)
tmp4 = tl.load(in_ptr0 + (8 + x0), xmask)
tmp0 = tl.full([1], 2, tl.int32)
tmp1 = tl.full([1], 1, tl.int32)
tmp2 = tmp0 == tmp1
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = libdevice.isinf(tmp5).to(tl.int1)
tmp7 = 0.0
tmp8 = tl.where(tmp6, tmp7, tmp5)
tl.store(out_ptr1 + (8 + x0), tmp8, xmask)
@triton.jit
def triton_poi_fused_9(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp3 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + x2, xmask)
tmp0 = x1
tmp1 = tl.full([1], 2, tl.int32)
tmp2 = tmp0 == tmp1
tmp5 = tl.where(tmp2, tmp3, tmp4)
tl.store(out_ptr0 + x2, tmp5, xmask)
@triton.jit
def triton_per_fused_index_put_max_10(in_ptr0, in_ptr1, out_ptr2, xnumel,
rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp2 = tl.load(in_ptr0 + (8 + r0), None)
tmp9 = tl.load(in_ptr1 + (4 + r0), None)
tmp10 = tl.load(in_ptr1 + (8 + r0), None)
tmp0 = tl.full([1, 1], 2, tl.int32)
tmp1 = tmp0 == tmp0
tmp3 = tl.where(tmp1, tmp2, tmp2)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = triton_helpers.max2(tmp4, 1)[:, None]
tmp7 = tl.full([1, 1], 1, tl.int32)
tmp8 = tmp0 == tmp7
tmp11 = tl.where(tmp8, tmp9, tmp10)
tmp12 = libdevice.isinf(tmp11).to(tl.int1)
tmp13 = tl.where(tmp12, tmp6, tmp3)
tl.store(out_ptr2 + tl.broadcast_to(8 + r0, [XBLOCK, RBLOCK]), tmp13, None)
@triton.jit
def triton_poi_fused_index_put_lift_fresh_11(in_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp3 = tl.load(in_ptr0 + (8 + x0), xmask)
tmp4 = tl.load(in_ptr0 + (12 + x0), xmask)
tmp0 = tl.full([1], 3, tl.int32)
tmp1 = tl.full([1], 2, tl.int32)
tmp2 = tmp0 == tmp1
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = libdevice.isinf(tmp5).to(tl.int1)
tmp7 = 0.0
tmp8 = tl.where(tmp6, tmp7, tmp5)
tl.store(out_ptr1 + (12 + x0), tmp8, xmask)
@triton.jit
def triton_poi_fused_12(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp3 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + x2, xmask)
tmp0 = x1
tmp1 = tl.full([1], 3, tl.int32)
tmp2 = tmp0 == tmp1
tmp5 = tl.where(tmp2, tmp3, tmp4)
tl.store(out_ptr0 + x2, tmp5, xmask)
@triton.jit
def triton_per_fused_index_put_max_13(in_ptr0, in_ptr1, out_ptr2, xnumel,
rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp2 = tl.load(in_ptr0 + (12 + r0), None)
tmp9 = tl.load(in_ptr1 + (8 + r0), None)
tmp10 = tl.load(in_ptr1 + (12 + r0), None)
tmp0 = tl.full([1, 1], 3, tl.int32)
tmp1 = tmp0 == tmp0
tmp3 = tl.where(tmp1, tmp2, tmp2)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = triton_helpers.max2(tmp4, 1)[:, None]
tmp7 = tl.full([1, 1], 2, tl.int32)
tmp8 = tmp0 == tmp7
tmp11 = tl.where(tmp8, tmp9, tmp10)
tmp12 = libdevice.isinf(tmp11).to(tl.int1)
tmp13 = tl.where(tmp12, tmp6, tmp3)
tl.store(out_ptr2 + tl.broadcast_to(12 + r0, [XBLOCK, RBLOCK]), tmp13, None
)
@triton.jit
def triton_per_fused_add_div_mean_mul_rsub_sum_14(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + 12)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp6 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + 13)
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp12 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr1 + 14)
tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK])
tmp19 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr1 + 15)
tmp25 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK])
tmp26 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + 4 * r0, None, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr3 + 4 * r0, None, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr2 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr3 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp39 = tl.load(in_ptr2 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp40 = tl.load(in_ptr3 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp44 = tl.load(in_ptr2 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp45 = tl.load(in_ptr3 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp1 = r0
tmp2 = tl.full([1, 1], 3, tl.int32)
tmp3 = tmp1 == tmp2
tmp7 = tl.where(tmp3, tmp5, tmp6)
tmp8 = tmp0 * tmp7
tmp13 = tl.where(tmp3, tmp11, tmp12)
tmp14 = tmp9 * tmp13
tmp15 = tmp8 + tmp14
tmp20 = tl.where(tmp3, tmp18, tmp19)
tmp21 = tmp16 * tmp20
tmp22 = tmp15 + tmp21
tmp27 = tl.where(tmp3, tmp25, tmp26)
tmp28 = tmp23 * tmp27
tmp29 = tmp22 + tmp28
tmp32 = tmp30 + tmp31
tmp33 = tmp32 * tmp7
tmp36 = tmp34 + tmp35
tmp37 = tmp36 * tmp13
tmp38 = tmp33 + tmp37
tmp41 = tmp39 + tmp40
tmp42 = tmp41 * tmp20
tmp43 = tmp38 + tmp42
tmp46 = tmp44 + tmp45
tmp47 = tmp46 * tmp27
tmp48 = tmp43 + tmp47
tmp49 = 2.0
tmp50 = tmp29 * tmp49
tmp51 = 1e-05
tmp52 = tmp50 + tmp51
tmp53 = tmp48 + tmp51
tmp54 = tmp52 / tmp53
tmp55 = 1.0
tmp56 = tmp55 - tmp54
tmp57 = tl.broadcast_to(tmp56, [XBLOCK, RBLOCK])
tmp59 = tl.sum(tmp57, 1)[:, None]
tmp60 = 4.0
tmp61 = tmp59 / tmp60
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp61, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf29 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_mul_sum_0[grid(16)](arg1_1, arg0_1, buf0, buf1,
buf29, 16, 16, XBLOCK=8, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
buf2 = empty_strided_cuda((4,), (1,), torch.float32)
triton_poi_fused_index_put_lift_fresh_1[grid(4)](buf1, buf2, 4,
XBLOCK=4, num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_mul_reciprocal_2[grid(16)](buf2, buf1, buf4, 16,
XBLOCK=16, num_warps=1, num_stages=1)
triton_per_fused_index_put_max_3[grid(1)](buf2, buf1, buf4, 1, 4,
XBLOCK=1, num_warps=2, num_stages=1)
del buf2
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_4[grid(16)](buf4, buf7, 16, XBLOCK=16, num_warps=1,
num_stages=1)
triton_poi_fused_index_put_lift_fresh_5[grid(4)](buf4, buf7, 4,
XBLOCK=4, num_warps=1, num_stages=1)
buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_6[grid(16)](buf7, buf11, 16, XBLOCK=16, num_warps=
1, num_stages=1)
triton_per_fused_index_put_max_7[grid(1)](buf7, buf4, buf11, 1, 4,
XBLOCK=1, num_warps=2, num_stages=1)
buf14 = buf7
del buf7
triton_poi_fused_6[grid(16)](buf11, buf14, 16, XBLOCK=16, num_warps
=1, num_stages=1)
triton_poi_fused_index_put_lift_fresh_8[grid(4)](buf11, buf14, 4,
XBLOCK=4, num_warps=1, num_stages=1)
buf18 = buf4
del buf4
triton_poi_fused_9[grid(16)](buf14, buf18, 16, XBLOCK=16, num_warps
=1, num_stages=1)
triton_per_fused_index_put_max_10[grid(1)](buf14, buf11, buf18, 1,
4, XBLOCK=1, num_warps=2, num_stages=1)
buf21 = buf14
del buf14
triton_poi_fused_9[grid(16)](buf18, buf21, 16, XBLOCK=16, num_warps
=1, num_stages=1)
triton_poi_fused_index_put_lift_fresh_11[grid(4)](buf18, buf21, 4,
XBLOCK=4, num_warps=1, num_stages=1)
buf25 = buf11
del buf11
triton_poi_fused_12[grid(16)](buf21, buf25, 16, XBLOCK=16,
num_warps=1, num_stages=1)
triton_per_fused_index_put_max_13[grid(1)](buf21, buf18, buf25, 1,
4, XBLOCK=1, num_warps=2, num_stages=1)
del buf18
del buf21
buf31 = empty_strided_cuda((), (), torch.float32)
buf32 = buf31
del buf31
triton_per_fused_add_div_mean_mul_rsub_sum_14[grid(1)](buf32, buf0,
buf25, buf1, buf29, 1, 4, XBLOCK=1, num_warps=2, num_stages=1)
del buf0
del buf1
del buf25
del buf29
return buf32,
def issequenceiterable(obj: 'Any') ->bool:
"""
Determine if the object is an iterable sequence and is not a string.
"""
if torch.is_tensor(obj):
return int(obj.dim()) > 0
return isinstance(obj, collections.abc.Iterable) and not isinstance(obj,
str)
def ensure_tuple(vals: 'Any') ->Tuple[Any, ...]:
"""
Returns a tuple of `vals`.
"""
if not issequenceiterable(vals):
vals = vals,
return tuple(vals)
def ensure_tuple_size(tup: 'Any', dim: 'int', pad_val: 'Any'=0) ->Tuple[Any,
...]:
"""
Returns a copy of `tup` with `dim` values by either shortened or padded with `pad_val` as necessary.
"""
tup = ensure_tuple(tup) + (pad_val,) * dim
return tuple(tup[:dim])
def one_hot(labels: 'torch.Tensor', num_classes: 'int', dtype:
'torch.dtype'=torch.float, dim: 'int'=1) ->torch.Tensor:
"""
For a tensor `labels` of dimensions B1[spatial_dims], return a tensor of dimensions `BN[spatial_dims]`
for `num_classes` N number of classes.
Example:
For every value v = labels[b,1,h,w], the value in the result at [b,v,h,w] will be 1 and all others 0.
Note that this will include the background label, thus a binary mask should be treated as having 2 classes.
"""
assert labels.dim() > 0, 'labels should have dim of 1 or more.'
if labels.ndim < dim + 1:
shape = ensure_tuple_size(labels.shape, dim + 1, 1)
labels = labels.reshape(*shape)
sh = list(labels.shape)
assert sh[dim
] == 1, 'labels should have a channel with length equals to one.'
sh[dim] = num_classes
o = torch.zeros(size=sh, dtype=dtype, device=labels.device)
labels = o.scatter_(dim=dim, index=labels.long(), value=1)
return labels
class LossReduction(Enum):
"""
See also:
- :py:class:`monai.losses.dice.DiceLoss`
- :py:class:`monai.losses.dice.GeneralizedDiceLoss`
- :py:class:`monai.losses.focal_loss.FocalLoss`
- :py:class:`monai.losses.tversky.TverskyLoss`
"""
NONE = 'none'
MEAN = 'mean'
SUM = 'sum'
class Weight(Enum):
"""
See also: :py:class:`monai.losses.dice.GeneralizedDiceLoss`
"""
SQUARE = 'square'
SIMPLE = 'simple'
UNIFORM = 'uniform'
class GeneralizedDiceLossNew(_Loss):
"""
Compute the generalised Dice loss defined in:
Sudre, C. et. al. (2017) Generalised Dice overlap as a deep learning
loss function for highly unbalanced segmentations. DLMIA 2017.
Adapted from:
https://github.com/NifTK/NiftyNet/blob/v0.6.0/niftynet/layer/loss_segmentation.py#L279
"""
def __init__(self, include_background: 'bool'=True, to_onehot_y: 'bool'
=False, sigmoid: 'bool'=False, softmax: 'bool'=False, other_act:
'Optional[Callable]'=None, w_type: 'Union[Weight, str]'=Weight.
SQUARE, reduction: 'Union[LossReduction, str]'=LossReduction.MEAN,
smooth_nr: 'float'=1e-05, smooth_dr: 'float'=1e-05, batch: 'bool'=False
) ->None:
"""
Args:
include_background: If False channel index 0 (background category) is excluded from the calculation.
to_onehot_y: whether to convert `y` into the one-hot format. Defaults to False.
sigmoid: If True, apply a sigmoid function to the prediction.
softmax: If True, apply a softmax function to the prediction.
other_act: if don't want to use `sigmoid` or `softmax`, use other callable function to execute
other activation layers, Defaults to ``None``. for example:
`other_act = torch.tanh`.
squared_pred: use squared versions of targets and predictions in the denominator or not.
w_type: {``"square"``, ``"simple"``, ``"uniform"``}
Type of function to transform ground truth volume to a weight factor. Defaults to ``"square"``.
reduction: {``"none"``, ``"mean"``, ``"sum"``}
Specifies the reduction to apply to the output. Defaults to ``"mean"``.
- ``"none"``: no reduction will be applied.
- ``"mean"``: the sum of the output will be divided by the number of elements in the output.
- ``"sum"``: the output will be summed.
smooth_nr: a small constant added to the numerator to avoid zero.
smooth_dr: a small constant added to the denominator to avoid nan.
batch: whether to sum the intersection and union areas over the batch dimension before the dividing.
Defaults to False, intersection over union is computed from each item in the batch.
Raises:
TypeError: When ``other_act`` is not an ``Optional[Callable]``.
ValueError: When more than 1 of [``sigmoid=True``, ``softmax=True``, ``other_act is not None``].
Incompatible values.
"""
super().__init__(reduction=LossReduction(reduction).value)
if other_act is not None and not callable(other_act):
raise TypeError(
f'other_act must be None or callable but is {type(other_act).__name__}.'
)
if int(sigmoid) + int(softmax) + int(other_act is not None) > 1:
raise ValueError(
'Incompatible values: more than 1 of [sigmoid=True, softmax=True, other_act is not None].'
)
self.include_background = include_background
self.to_onehot_y = to_onehot_y
self.sigmoid = sigmoid
self.softmax = softmax
self.other_act = other_act
w_type = Weight(w_type)
self.w_func: 'Callable' = torch.ones_like
if w_type == Weight.SIMPLE:
self.w_func = torch.reciprocal
elif w_type == Weight.SQUARE:
self.w_func = lambda x: torch.reciprocal(x * x)
self.smooth_nr = float(smooth_nr)
self.smooth_dr = float(smooth_dr)
self.batch = batch
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| danielschulz/MONAI | GeneralizedDiceLoss | false | 1,811 | [
"Apache-2.0"
] | 0 | 54ef6e9e700f0de3d50184c0148f953be871a58e | https://github.com/danielschulz/MONAI/tree/54ef6e9e700f0de3d50184c0148f953be871a58e | import collections
import torch
import warnings
from typing import Optional
from typing import Union
from typing import Any
from typing import Callable
from typing import Tuple
import torch.nn
from torch.nn.modules.loss import _Loss
from enum import Enum
import collections.abc
def issequenceiterable(obj: 'Any') ->bool:
"""
Determine if the object is an iterable sequence and is not a string.
"""
if torch.is_tensor(obj):
return int(obj.dim()) > 0
return isinstance(obj, collections.abc.Iterable) and not isinstance(obj,
str)
def ensure_tuple(vals: 'Any') ->Tuple[Any, ...]:
"""
Returns a tuple of `vals`.
"""
if not issequenceiterable(vals):
vals = vals,
return tuple(vals)
def ensure_tuple_size(tup: 'Any', dim: 'int', pad_val: 'Any'=0) ->Tuple[Any,
...]:
"""
Returns a copy of `tup` with `dim` values by either shortened or padded with `pad_val` as necessary.
"""
tup = ensure_tuple(tup) + (pad_val,) * dim
return tuple(tup[:dim])
def one_hot(labels: 'torch.Tensor', num_classes: 'int', dtype:
'torch.dtype'=torch.float, dim: 'int'=1) ->torch.Tensor:
"""
For a tensor `labels` of dimensions B1[spatial_dims], return a tensor of dimensions `BN[spatial_dims]`
for `num_classes` N number of classes.
Example:
For every value v = labels[b,1,h,w], the value in the result at [b,v,h,w] will be 1 and all others 0.
Note that this will include the background label, thus a binary mask should be treated as having 2 classes.
"""
assert labels.dim() > 0, 'labels should have dim of 1 or more.'
if labels.ndim < dim + 1:
shape = ensure_tuple_size(labels.shape, dim + 1, 1)
labels = labels.reshape(*shape)
sh = list(labels.shape)
assert sh[dim
] == 1, 'labels should have a channel with length equals to one.'
sh[dim] = num_classes
o = torch.zeros(size=sh, dtype=dtype, device=labels.device)
labels = o.scatter_(dim=dim, index=labels.long(), value=1)
return labels
class LossReduction(Enum):
"""
See also:
- :py:class:`monai.losses.dice.DiceLoss`
- :py:class:`monai.losses.dice.GeneralizedDiceLoss`
- :py:class:`monai.losses.focal_loss.FocalLoss`
- :py:class:`monai.losses.tversky.TverskyLoss`
"""
NONE = 'none'
MEAN = 'mean'
SUM = 'sum'
class Weight(Enum):
"""
See also: :py:class:`monai.losses.dice.GeneralizedDiceLoss`
"""
SQUARE = 'square'
SIMPLE = 'simple'
UNIFORM = 'uniform'
class Model(_Loss):
"""
Compute the generalised Dice loss defined in:
Sudre, C. et. al. (2017) Generalised Dice overlap as a deep learning
loss function for highly unbalanced segmentations. DLMIA 2017.
Adapted from:
https://github.com/NifTK/NiftyNet/blob/v0.6.0/niftynet/layer/loss_segmentation.py#L279
"""
def __init__(self, include_background: 'bool'=True, to_onehot_y: 'bool'
=False, sigmoid: 'bool'=False, softmax: 'bool'=False, other_act:
'Optional[Callable]'=None, w_type: 'Union[Weight, str]'=Weight.
SQUARE, reduction: 'Union[LossReduction, str]'=LossReduction.MEAN,
smooth_nr: 'float'=1e-05, smooth_dr: 'float'=1e-05, batch: 'bool'=False
) ->None:
"""
Args:
include_background: If False channel index 0 (background category) is excluded from the calculation.
to_onehot_y: whether to convert `y` into the one-hot format. Defaults to False.
sigmoid: If True, apply a sigmoid function to the prediction.
softmax: If True, apply a softmax function to the prediction.
other_act: if don't want to use `sigmoid` or `softmax`, use other callable function to execute
other activation layers, Defaults to ``None``. for example:
`other_act = torch.tanh`.
squared_pred: use squared versions of targets and predictions in the denominator or no
# ... truncated (>4000 chars) for memory efficiency |
AngularPenaltySMLoss | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/fh/cfhnguw4v6uy4ysjg54ojclakwi3bj2lte6oqizl4rpf4lcxpiyp.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.div]
# Source node to ATen node mapping:
# x => div_1
# Graph fragment:
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %expand_1), kwargs = {})
triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + (x3), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/or/corp6yamhrkxi6z5xvdc7wpuh547n5nfztmiyxnusguactj7eidm.py
# Topologically Sorted Source Nodes: [getitem], Original ATen: [aten.index]
# Source node to ATen node mapping:
# getitem => index
# Graph fragment:
# %index : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%permute_1, [%primals_2]), kwargs = {})
triton_poi_fused_index_1 = async_compile.triton('triton_poi_fused_index_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_index_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = (xindex // 64)
x0 = xindex % 16
x1 = (xindex // 16) % 4
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x2), xmask, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert(((0 <= tmp4) & (tmp4 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp4 < 4")
tmp6 = tl.load(in_ptr1 + (x0 + (16*tmp4) + (64*x1)), xmask)
tl.store(out_ptr0 + (x3), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/pa/cpa47iw26hf3y4ilyjcqcstvpgepahnuhhwn56meiqbef7mns2iv.py
# Topologically Sorted Source Nodes: [clamp, acos, add, cos, numerator], Original ATen: [aten.clamp, aten.acos, aten.add, aten.cos, aten.mul]
# Source node to ATen node mapping:
# acos => acos
# add => add
# clamp => clamp_max, clamp_min_2
# cos => cos
# numerator => mul
# Graph fragment:
# %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%diagonal, -0.9999999), kwargs = {})
# %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_2, 0.9999999), kwargs = {})
# %acos : [num_users=1] = call_function[target=torch.ops.aten.acos.default](args = (%clamp_max,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%acos, 0.5), kwargs = {})
# %cos : [num_users=1] = call_function[target=torch.ops.aten.cos.default](args = (%add,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%cos, 64.0), kwargs = {})
triton_poi_fused_acos_add_clamp_cos_mul_2 = async_compile.triton('triton_poi_fused_acos_add_clamp_cos_mul_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_acos_add_clamp_cos_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_acos_add_clamp_cos_mul_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (80*x1)), xmask)
tmp1 = -0.9999999
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = 0.9999999
tmp4 = triton_helpers.minimum(tmp2, tmp3)
tmp5 = libdevice.acos(tmp4)
tmp6 = 0.5
tmp7 = tmp5 + tmp6
tmp8 = tl_math.cos(tmp7)
tmp9 = 64.0
tmp10 = tmp8 * tmp9
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_div_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [wf], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [getitem], Original ATen: [aten.index]
triton_poi_fused_index_1.run(primals_2, buf1, buf2, 256, grid=grid(256), stream=stream0)
buf3 = empty_strided_cuda((4, 4, 4), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [clamp, acos, add, cos, numerator], Original ATen: [aten.clamp, aten.acos, aten.add, aten.cos, aten.mul]
triton_poi_fused_acos_add_clamp_cos_mul_2.run(buf2, buf3, 64, grid=grid(64), stream=stream0)
return (buf3, reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0), primals_2, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(buf2, (4, 4, 4), (4, 1, 80), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.int64)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class AngularPenaltySMLoss(nn.Module):
def __init__(self, in_features, out_features, loss_type='arcface', eps=
1e-07, s=None, m=None):
"""
Angular Penalty Softmax Loss
Three 'loss_types' available: ['arcface', 'sphereface', 'cosface']
These losses are described in the following papers:
ArcFace: https://arxiv.org/abs/1801.07698
SphereFace: https://arxiv.org/abs/1704.08063
CosFace/Ad Margin: https://arxiv.org/abs/1801.05599
"""
super(AngularPenaltySMLoss, self).__init__()
loss_type = loss_type.lower()
assert loss_type in ['arcface', 'sphereface', 'cosface']
if loss_type == 'arcface':
self.s = 64.0 if not s else s
self.m = 0.5 if not m else m
if loss_type == 'sphereface':
self.s = 64.0 if not s else s
self.m = 1.35 if not m else m
if loss_type == 'cosface':
self.s = 30.0 if not s else s
self.m = 0.4 if not m else m
self.loss_type = loss_type
self.in_features = in_features
self.out_features = out_features
self.fc = nn.Linear(in_features, out_features, bias=False)
self.eps = eps
def forward(self, x, labels):
"""
input shape (N, in_features)
"""
assert len(x) == len(labels)
assert torch.min(labels) >= 0
assert torch.max(labels) < self.out_features
for W in self.fc.parameters():
W = F.normalize(W, p=2, dim=1)
x = F.normalize(x, p=2, dim=1)
wf = self.fc(x)
if self.loss_type == 'cosface':
numerator = self.s * (torch.diagonal(wf.transpose(0, 1)[labels]
) - self.m)
if self.loss_type == 'arcface':
numerator = self.s * torch.cos(torch.acos(torch.clamp(torch.
diagonal(wf.transpose(0, 1)[labels]), -1.0 + self.eps, 1 -
self.eps)) + self.m)
if self.loss_type == 'sphereface':
numerator = self.s * torch.cos(self.m * torch.acos(torch.clamp(
torch.diagonal(wf.transpose(0, 1)[labels]), -1.0 + self.eps,
1 - self.eps)))
excl = torch.cat([torch.cat((wf[i, :y], wf[i, y + 1:])).unsqueeze(0
) for i, y in enumerate(labels)], dim=0)
denominator = torch.exp(numerator) + torch.sum(torch.exp(self.s *
excl), dim=1)
L = numerator - torch.log(denominator)
return -torch.mean(L)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.ones([4], dtype=torch.int64)]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x3, tmp15, xmask)
@triton.jit
def triton_poi_fused_index_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 64
x0 = xindex % 16
x1 = xindex // 16 % 4
x3 = xindex
tmp0 = tl.load(in_ptr0 + x2, xmask, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert((0 <= tmp4) & (tmp4 < 4) | ~xmask,
'index out of bounds: 0 <= tmp4 < 4')
tmp6 = tl.load(in_ptr1 + (x0 + 16 * tmp4 + 64 * x1), xmask)
tl.store(out_ptr0 + x3, tmp6, xmask)
@triton.jit
def triton_poi_fused_acos_add_clamp_cos_mul_2(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 80 * x1), xmask)
tmp1 = -0.9999999
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = 0.9999999
tmp4 = triton_helpers.minimum(tmp2, tmp3)
tmp5 = libdevice.acos(tmp4)
tmp6 = 0.5
tmp7 = tmp5 + tmp6
tmp8 = tl_math.cos(tmp7)
tmp9 = 64.0
tmp10 = tmp8 * tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(256)](primals_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_index_1[grid(256)](primals_2, buf1, buf2, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 4), (4, 1, 16), torch.float32)
triton_poi_fused_acos_add_clamp_cos_mul_2[grid(64)](buf2, buf3, 64,
XBLOCK=64, num_warps=1, num_stages=1)
return buf3, reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0
), primals_2, reinterpret_tensor(buf0, (64, 4), (4, 1), 0
), reinterpret_tensor(buf2, (4, 4, 4), (4, 1, 80), 0)
class AngularPenaltySMLossNew(nn.Module):
def __init__(self, in_features, out_features, loss_type='arcface', eps=
1e-07, s=None, m=None):
"""
Angular Penalty Softmax Loss
Three 'loss_types' available: ['arcface', 'sphereface', 'cosface']
These losses are described in the following papers:
ArcFace: https://arxiv.org/abs/1801.07698
SphereFace: https://arxiv.org/abs/1704.08063
CosFace/Ad Margin: https://arxiv.org/abs/1801.05599
"""
super(AngularPenaltySMLossNew, self).__init__()
loss_type = loss_type.lower()
assert loss_type in ['arcface', 'sphereface', 'cosface']
if loss_type == 'arcface':
self.s = 64.0 if not s else s
self.m = 0.5 if not m else m
if loss_type == 'sphereface':
self.s = 64.0 if not s else s
self.m = 1.35 if not m else m
if loss_type == 'cosface':
self.s = 30.0 if not s else s
self.m = 0.4 if not m else m
self.loss_type = loss_type
self.in_features = in_features
self.out_features = out_features
self.fc = nn.Linear(in_features, out_features, bias=False)
self.eps = eps
def forward(self, input_0, input_1):
primals_3 = self.fc.weight
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3])
return output[0]
| dayoungMM/Angular-Penalty-Softmax-Losses-Pytorch | AngularPenaltySMLoss | false | 1,812 | [
"MIT"
] | 0 | 5599f2e280b2af8d40e53727290eb797d18e7239 | https://github.com/dayoungMM/Angular-Penalty-Softmax-Losses-Pytorch/tree/5599f2e280b2af8d40e53727290eb797d18e7239 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, in_features, out_features, loss_type='arcface', eps=
1e-07, s=None, m=None):
"""
Angular Penalty Softmax Loss
Three 'loss_types' available: ['arcface', 'sphereface', 'cosface']
These losses are described in the following papers:
ArcFace: https://arxiv.org/abs/1801.07698
SphereFace: https://arxiv.org/abs/1704.08063
CosFace/Ad Margin: https://arxiv.org/abs/1801.05599
"""
super().__init__()
loss_type = loss_type.lower()
assert loss_type in ['arcface', 'sphereface', 'cosface']
if loss_type == 'arcface':
self.s = 64.0 if not s else s
self.m = 0.5 if not m else m
if loss_type == 'sphereface':
self.s = 64.0 if not s else s
self.m = 1.35 if not m else m
if loss_type == 'cosface':
self.s = 30.0 if not s else s
self.m = 0.4 if not m else m
self.loss_type = loss_type
self.in_features = in_features
self.out_features = out_features
self.fc = nn.Linear(in_features, out_features, bias=False)
self.eps = eps
def forward(self, x, labels):
"""
input shape (N, in_features)
"""
assert len(x) == len(labels)
assert torch.min(labels) >= 0
assert torch.max(labels) < self.out_features
for W in self.fc.parameters():
W = F.normalize(W, p=2, dim=1)
x = F.normalize(x, p=2, dim=1)
wf = self.fc(x)
if self.loss_type == 'cosface':
numerator = self.s * (torch.diagonal(wf.transpose(0, 1)[labels]
) - self.m)
if self.loss_type == 'arcface':
numerator = self.s * torch.cos(torch.acos(torch.clamp(torch.
diagonal(wf.transpose(0, 1)[labels]), -1.0 + self.eps, 1 -
self.eps)) + self.m)
if self.loss_type == 'sphereface':
numerator = self.s * torch.cos(self.m * torch.acos(torch.clamp(
torch.diagonal(wf.transpose(0, 1)[labels]), -1.0 + self.eps,
1 - self.eps)))
excl = torch.cat([torch.cat((wf[i, :y], wf[i, y + 1:])).unsqueeze(0
) for i, y in enumerate(labels)], dim=0)
denominator = torch.exp(numerator) + torch.sum(torch.exp(self.s *
excl), dim=1)
L = numerator - torch.log(denominator)
return -torch.mean(L)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.ones([4], dtype=torch.int64)]
def get_init_inputs():
return [4, 4]
|
ToSEG | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/wi/cwiyl3lwwtancorrifw77xt3aqb4lermdintht45zvkj3bg54nbl.py
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# mul => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, 0.5), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/2o/c2oqkq7zaubqmw7vuixxlseb2ff5jzqqbyczicxlmsahuxwdpdyp.py
# Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# mul_1 => mul_1
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, 1), kwargs = {})
triton_poi_fused_mul_1 = async_compile.triton('triton_poi_fused_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/s3/cs3vevvlveudb7oguup5ljgcyslvygs2cnrc5347em4iypopundn.py
# Topologically Sorted Source Nodes: [mul_2, weight], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# mul_2 => mul_2
# weight => mul_3
# Graph fragment:
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_5, 0.5), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %view), kwargs = {})
triton_poi_fused_mul_2 = async_compile.triton('triton_poi_fused_mul_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex % 16
x0 = xindex % 4
x2 = (xindex // 16)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x0 + (4*x2)), xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x4), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/r2/cr263a6gzji5hcuzutpzrubs2olns2ao2sa7aaaziqrb7stxhlqd.py
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.add]
# Source node to ATen node mapping:
# out_3 => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_3, %primals_6), kwargs = {})
triton_poi_fused_add_3 = async_compile.triton('triton_poi_fused_add_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (1, 4, 4, 1, 1), (16, 4, 1, 1, 1))
assert_size_stride(primals_6, (1, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(primals_2, buf0, 16, grid=grid(16), stream=stream0)
del primals_2
buf1 = empty_strided_cuda((4, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul]
triton_poi_fused_mul_1.run(primals_3, buf1, 4, grid=grid(4), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul_1, out], Original ATen: [aten.mul, aten.addmm]
extern_kernels.addmm(buf1, primals_4, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del buf0
del buf1
buf3 = empty_strided_cuda((4, 4, 4, 1, 1), (16, 4, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul_2, weight], Original ATen: [aten.mul]
triton_poi_fused_mul_2.run(primals_5, buf2, buf3, 64, grid=grid(64), stream=stream0)
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf3, (16, 4, 1, 1), (4, 1, 0, 0), 0), stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf4, (1, 16, 4, 4), (256, 16, 4, 1))
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.add]
triton_poi_fused_add_3.run(buf5, primals_6, 256, grid=grid(256), stream=stream0)
del primals_6
return (buf5, primals_4, primals_5, buf2, reinterpret_tensor(buf3, (16, 4, 1, 1), (4, 1, 1, 1), 0), reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1, 4, 4, 1, 1), (16, 4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from torch.autograd import Function
import math
import torch
import warnings
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.cpp_extension
def fused_leaky_relu(input, bias=None, negative_slope=0.2, scale=2 ** 0.5):
if input.device.type == 'cpu':
if bias is not None:
rest_dim = [1] * (input.ndim - bias.ndim - 1)
return F.leaky_relu(input + bias.view(1, bias.shape[0], *
rest_dim), negative_slope=0.2) * scale
else:
return F.leaky_relu(input, negative_slope=0.2) * scale
else:
return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale)
def make_kernel(k):
k = torch.tensor(k, dtype=torch.float32)
if k.ndim == 1:
k = k[None, :] * k[:, None]
k /= k.sum()
return k
def _init():
global _inited, _plugin
if not _inited:
sources = ['upfirdn2d.cpp', 'upfirdn2d.cu']
sources = [os.path.join(os.path.dirname(__file__), s) for s in sources]
try:
_plugin = custom_ops.get_plugin('upfirdn2d_plugin', sources=
sources, extra_cuda_cflags=['--use_fast_math'])
except:
warnings.warn(
"""Failed to build CUDA kernels for upfirdn2d. Falling back to slow reference implementation. Details:
"""
+ traceback.format_exc())
return _plugin is not None
def _get_filter_size(f):
if f is None:
return 1, 1
assert isinstance(f, torch.Tensor) and f.ndim in [1, 2]
fw = f.shape[-1]
fh = f.shape[0]
with misc.suppress_tracer_warnings():
fw = int(fw)
fh = int(fh)
misc.assert_shape(f, [fh, fw][:f.ndim])
assert fw >= 1 and fh >= 1
return fw, fh
def _parse_padding(padding):
if isinstance(padding, int):
padding = [padding, padding]
assert isinstance(padding, (list, tuple))
assert all(isinstance(x, int) for x in padding)
if len(padding) == 2:
padx, pady = padding
padding = [padx, padx, pady, pady]
padx0, padx1, pady0, pady1 = padding
return padx0, padx1, pady0, pady1
def _parse_scaling(scaling):
if isinstance(scaling, int):
scaling = [scaling, scaling]
assert isinstance(scaling, (list, tuple))
assert all(isinstance(x, int) for x in scaling)
sx, sy = scaling
assert sx >= 1 and sy >= 1
return sx, sy
def _upfirdn2d_cuda(up=1, down=1, padding=0, flip_filter=False, gain=1):
"""Fast CUDA implementation of `upfirdn2d()` using custom ops.
"""
upx, upy = _parse_scaling(up)
downx, downy = _parse_scaling(down)
padx0, padx1, pady0, pady1 = _parse_padding(padding)
key = upx, upy, downx, downy, padx0, padx1, pady0, pady1, flip_filter, gain
if key in _upfirdn2d_cuda_cache:
return _upfirdn2d_cuda_cache[key]
class Upfirdn2dCuda(torch.autograd.Function):
@staticmethod
def forward(ctx, x, f):
assert isinstance(x, torch.Tensor) and x.ndim == 4
if f is None:
f = torch.ones([1, 1], dtype=torch.float32, device=x.device)
assert isinstance(f, torch.Tensor) and f.ndim in [1, 2]
y = x
if f.ndim == 2:
y = _plugin.upfirdn2d(y, f, upx, upy, downx, downy, padx0,
padx1, pady0, pady1, flip_filter, gain)
else:
y = _plugin.upfirdn2d(y, f.unsqueeze(0), upx, 1, downx, 1,
padx0, padx1, 0, 0, flip_filter, np.sqrt(gain))
y = _plugin.upfirdn2d(y, f.unsqueeze(1), 1, upy, 1, downy,
0, 0, pady0, pady1, flip_filter, np.sqrt(gain))
ctx.save_for_backward(f)
ctx.x_shape = x.shape
return y
@staticmethod
def backward(ctx, dy):
f, = ctx.saved_tensors
_, _, ih, iw = ctx.x_shape
_, _, oh, ow = dy.shape
fw, fh = _get_filter_size(f)
p = [fw - padx0 - 1, iw * upx - ow * downx + padx0 - upx + 1,
fh - pady0 - 1, ih * upy - oh * downy + pady0 - upy + 1]
dx = None
df = None
if ctx.needs_input_grad[0]:
dx = _upfirdn2d_cuda(up=down, down=up, padding=p,
flip_filter=not flip_filter, gain=gain).apply(dy, f)
assert not ctx.needs_input_grad[1]
return dx, df
_upfirdn2d_cuda_cache[key] = Upfirdn2dCuda
return Upfirdn2dCuda
def upfirdn2d(x, f, up=1, down=1, padding=0, flip_filter=False, gain=1,
impl='cuda'):
"""Pad, upsample, filter, and downsample a batch of 2D images.
Performs the following sequence of operations for each channel:
1. Upsample the image by inserting N-1 zeros after each pixel (`up`).
2. Pad the image with the specified number of zeros on each side (`padding`).
Negative padding corresponds to cropping the image.
3. Convolve the image with the specified 2D FIR filter (`f`), shrinking it
so that the footprint of all output pixels lies within the input image.
4. Downsample the image by keeping every Nth pixel (`down`).
This sequence of operations bears close resemblance to scipy.signal.upfirdn().
The fused op is considerably more efficient than performing the same calculation
using standard PyTorch ops. It supports gradients of arbitrary order.
Args:
x: Float32/float64/float16 input tensor of the shape
`[batch_size, num_channels, in_height, in_width]`.
f: Float32 FIR filter of the shape
`[filter_height, filter_width]` (non-separable),
`[filter_taps]` (separable), or
`None` (identity).
up: Integer upsampling factor. Can be a single int or a list/tuple
`[x, y]` (default: 1).
down: Integer downsampling factor. Can be a single int or a list/tuple
`[x, y]` (default: 1).
padding: Padding with respect to the upsampled image. Can be a single number
or a list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
(default: 0).
flip_filter: False = convolution, True = correlation (default: False).
gain: Overall scaling factor for signal magnitude (default: 1).
impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`).
Returns:
Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
"""
assert isinstance(x, torch.Tensor)
assert impl in ['ref', 'cuda']
if impl == 'cuda' and x.device.type == 'cuda' and _init():
return _upfirdn2d_cuda(up=up, down=down, padding=padding,
flip_filter=flip_filter, gain=gain).apply(x, f)
return _upfirdn2d_ref(x, f, up=up, down=down, padding=padding,
flip_filter=flip_filter, gain=gain)
def upsample(in_tens, out_H=64):
in_H = in_tens.shape[2]
scale_factor = 1.0 * out_H / in_H
return nn.Upsample(scale_factor=scale_factor, mode='bilinear',
align_corners=False)(in_tens)
class FusedLeakyReLUFunctionBackward(Function):
@staticmethod
def forward(ctx, grad_output, out, bias, negative_slope, scale):
ctx.save_for_backward(out)
ctx.negative_slope = negative_slope
ctx.scale = scale
empty = grad_output.new_empty(0)
grad_input = fused.fused_bias_act(grad_output, empty, out, 3, 1,
negative_slope, scale)
dim = [0]
if grad_input.ndim > 2:
dim += list(range(2, grad_input.ndim))
if bias:
grad_bias = grad_input.sum(dim).detach()
else:
grad_bias = empty
return grad_input, grad_bias
@staticmethod
def backward(ctx, gradgrad_input, gradgrad_bias):
out, = ctx.saved_tensors
gradgrad_out = fused.fused_bias_act(gradgrad_input, gradgrad_bias,
out, 3, 1, ctx.negative_slope, ctx.scale)
return gradgrad_out, None, None, None, None
class FusedLeakyReLUFunction(Function):
@staticmethod
def forward(ctx, input, bias, negative_slope, scale):
empty = input.new_empty(0)
ctx.bias = bias is not None
if bias is None:
bias = empty
out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope,
scale)
ctx.save_for_backward(out)
ctx.negative_slope = negative_slope
ctx.scale = scale
return out
@staticmethod
def backward(ctx, grad_output):
out, = ctx.saved_tensors
grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply(
grad_output, out, ctx.bias, ctx.negative_slope, ctx.scale)
if not ctx.bias:
grad_bias = None
return grad_input, grad_bias, None, None
class EqualLinear(nn.Module):
def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1,
activation=None):
super().__init__()
self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul))
if bias:
self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init))
else:
self.bias = None
self.activation = activation
self.scale = 1 / math.sqrt(in_dim) * lr_mul
self.lr_mul = lr_mul
def forward(self, input):
if self.activation:
out = F.linear(input, self.weight * self.scale)
out = fused_leaky_relu(out, self.bias * self.lr_mul)
else:
out = F.linear(input, self.weight * self.scale, bias=self.bias *
self.lr_mul)
return out
def __repr__(self):
return (
f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})'
)
class Blur(nn.Module):
def __init__(self, kernel, pad, upsample_factor=1):
super().__init__()
kernel = make_kernel(kernel)
if upsample_factor > 1:
kernel = kernel * upsample_factor ** 2
self.register_buffer('kernel', kernel)
self.pad = pad
def forward(self, input):
out = upfirdn2d(input, self.kernel, pad=self.pad)
return out
class ModulatedConv2d(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, style_dim,
demodulate=True, upsample=False, downsample=False, blur_kernel=[1,
3, 3, 1]):
super().__init__()
self.eps = 1e-08
self.kernel_size = kernel_size
self.in_channel = in_channel
self.out_channel = out_channel
self.upsample = upsample
self.downsample = downsample
if upsample:
factor = 2
p = len(blur_kernel) - factor - (kernel_size - 1)
pad0 = (p + 1) // 2 + factor - 1
pad1 = p // 2 + 1
self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor
=factor)
if downsample:
factor = 2
p = len(blur_kernel) - factor + (kernel_size - 1)
pad0 = (p + 1) // 2
pad1 = p // 2
self.blur = Blur(blur_kernel, pad=(pad0, pad1))
fan_in = in_channel * kernel_size ** 2
self.scale = 1 / math.sqrt(fan_in)
self.padding = kernel_size // 2
self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel,
kernel_size, kernel_size))
self.modulation = EqualLinear(style_dim, in_channel, bias_init=1)
self.demodulate = demodulate
def __repr__(self):
return (
f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample}, downsample={self.downsample})'
)
def forward(self, input, style):
batch, in_channel, height, width = input.shape
style = self.modulation(style).view(batch, 1, in_channel, 1, 1)
weight = self.scale * self.weight * style
if self.demodulate:
demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-08)
weight = weight * demod.view(batch, self.out_channel, 1, 1, 1)
weight = weight.view(batch * self.out_channel, in_channel, self.
kernel_size, self.kernel_size)
if self.upsample:
input = input.view(1, batch * in_channel, height, width)
weight = weight.view(batch, self.out_channel, in_channel, self.
kernel_size, self.kernel_size)
weight = weight.transpose(1, 2).reshape(batch * in_channel,
self.out_channel, self.kernel_size, self.kernel_size)
out = F.conv_transpose2d(input, weight, padding=0, stride=2,
groups=batch)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
out = self.blur(out)
elif self.downsample:
input = self.blur(input)
_, _, height, width = input.shape
input = input.view(1, batch * in_channel, height, width)
out = F.conv2d(input, weight, padding=0, stride=2, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
else:
input = input.view(1, batch * in_channel, height, width)
out = F.conv2d(input, weight, padding=self.padding, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
return out
class Upsample(nn.Module):
def __init__(self, kernel, factor=2):
super().__init__()
self.factor = factor
kernel = make_kernel(kernel) * factor ** 2
self.register_buffer('kernel', kernel)
p = kernel.shape[0] - factor
pad0 = (p + 1) // 2 + factor - 1
pad1 = p // 2
self.pad = pad0, pad1
def forward(self, input):
out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad=
self.pad)
return out
class ToSEG(nn.Module):
def __init__(self, in_channel, out_channel, style_dim, upsample=True,
blur_kernel=[1, 3, 3, 1]):
super().__init__()
if upsample:
self.upsample = Upsample(blur_kernel)
self.conv = ModulatedConv2d(in_channel, out_channel, 1, style_dim,
demodulate=False)
self.bias = nn.Parameter(torch.zeros(1, out_channel, 1, 1))
def forward(self, input, style, skip=None):
out = self.conv(input, style)
out = out + self.bias
if skip is not None:
skip = self.upsample(skip)
out = out + skip
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_channel': 4, 'out_channel': 4, 'style_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.autograd import Function
import math
import warnings
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.cpp_extension
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex % 16
x0 = xindex % 4
x2 = xindex // 16
x4 = xindex
tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + x4, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (1, 4, 4, 1, 1), (16, 4, 1, 1, 1))
assert_size_stride(primals_6, (1, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(16)](primals_2, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((4,), (1,), torch.float32)
triton_poi_fused_mul_1[grid(4)](primals_3, buf1, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(buf1, primals_4, reinterpret_tensor(buf0, (4,
4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del buf0
del buf1
buf3 = empty_strided_cuda((4, 4, 4, 1, 1), (16, 4, 1, 1, 1), torch.
float32)
triton_poi_fused_mul_2[grid(64)](primals_5, buf2, buf3, 64, XBLOCK=
64, num_warps=1, num_stages=1)
buf4 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1,
16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf3, (16, 4,
1, 1), (4, 1, 0, 0), 0), stride=(1, 1), padding=(0, 0),
dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=4, bias=None)
assert_size_stride(buf4, (1, 16, 4, 4), (256, 16, 4, 1))
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf4
triton_poi_fused_add_3[grid(256)](buf5, primals_6, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_6
return buf5, primals_4, primals_5, buf2, reinterpret_tensor(buf3, (16,
4, 1, 1), (4, 1, 1, 1), 0), reinterpret_tensor(primals_1, (1, 16, 4,
4), (256, 16, 4, 1), 0)
def fused_leaky_relu(input, bias=None, negative_slope=0.2, scale=2 ** 0.5):
if input.device.type == 'cpu':
if bias is not None:
rest_dim = [1] * (input.ndim - bias.ndim - 1)
return F.leaky_relu(input + bias.view(1, bias.shape[0], *
rest_dim), negative_slope=0.2) * scale
else:
return F.leaky_relu(input, negative_slope=0.2) * scale
else:
return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale)
def make_kernel(k):
k = torch.tensor(k, dtype=torch.float32)
if k.ndim == 1:
k = k[None, :] * k[:, None]
k /= k.sum()
return k
def _init():
global _inited, _plugin
if not _inited:
sources = ['upfirdn2d.cpp', 'upfirdn2d.cu']
sources = [os.path.join(os.path.dirname(__file__), s) for s in sources]
try:
_plugin = custom_ops.get_plugin('upfirdn2d_plugin', sources=
sources, extra_cuda_cflags=['--use_fast_math'])
except:
warnings.warn(
"""Failed to build CUDA kernels for upfirdn2d. Falling back to slow reference implementation. Details:
"""
+ traceback.format_exc())
return _plugin is not None
def _get_filter_size(f):
if f is None:
return 1, 1
assert isinstance(f, torch.Tensor) and f.ndim in [1, 2]
fw = f.shape[-1]
fh = f.shape[0]
with misc.suppress_tracer_warnings():
fw = int(fw)
fh = int(fh)
misc.assert_shape(f, [fh, fw][:f.ndim])
assert fw >= 1 and fh >= 1
return fw, fh
def _parse_padding(padding):
if isinstance(padding, int):
padding = [padding, padding]
assert isinstance(padding, (list, tuple))
assert all(isinstance(x, int) for x in padding)
if len(padding) == 2:
padx, pady = padding
padding = [padx, padx, pady, pady]
padx0, padx1, pady0, pady1 = padding
return padx0, padx1, pady0, pady1
def _parse_scaling(scaling):
if isinstance(scaling, int):
scaling = [scaling, scaling]
assert isinstance(scaling, (list, tuple))
assert all(isinstance(x, int) for x in scaling)
sx, sy = scaling
assert sx >= 1 and sy >= 1
return sx, sy
def _upfirdn2d_cuda(up=1, down=1, padding=0, flip_filter=False, gain=1):
"""Fast CUDA implementation of `upfirdn2d()` using custom ops.
"""
upx, upy = _parse_scaling(up)
downx, downy = _parse_scaling(down)
padx0, padx1, pady0, pady1 = _parse_padding(padding)
key = upx, upy, downx, downy, padx0, padx1, pady0, pady1, flip_filter, gain
if key in _upfirdn2d_cuda_cache:
return _upfirdn2d_cuda_cache[key]
class Upfirdn2dCuda(torch.autograd.Function):
@staticmethod
def forward(ctx, x, f):
assert isinstance(x, torch.Tensor) and x.ndim == 4
if f is None:
f = torch.ones([1, 1], dtype=torch.float32, device=x.device)
assert isinstance(f, torch.Tensor) and f.ndim in [1, 2]
y = x
if f.ndim == 2:
y = _plugin.upfirdn2d(y, f, upx, upy, downx, downy, padx0,
padx1, pady0, pady1, flip_filter, gain)
else:
y = _plugin.upfirdn2d(y, f.unsqueeze(0), upx, 1, downx, 1,
padx0, padx1, 0, 0, flip_filter, np.sqrt(gain))
y = _plugin.upfirdn2d(y, f.unsqueeze(1), 1, upy, 1, downy,
0, 0, pady0, pady1, flip_filter, np.sqrt(gain))
ctx.save_for_backward(f)
ctx.x_shape = x.shape
return y
@staticmethod
def backward(ctx, dy):
f, = ctx.saved_tensors
_, _, ih, iw = ctx.x_shape
_, _, oh, ow = dy.shape
fw, fh = _get_filter_size(f)
p = [fw - padx0 - 1, iw * upx - ow * downx + padx0 - upx + 1,
fh - pady0 - 1, ih * upy - oh * downy + pady0 - upy + 1]
dx = None
df = None
if ctx.needs_input_grad[0]:
dx = _upfirdn2d_cuda(up=down, down=up, padding=p,
flip_filter=not flip_filter, gain=gain).apply(dy, f)
assert not ctx.needs_input_grad[1]
return dx, df
_upfirdn2d_cuda_cache[key] = Upfirdn2dCuda
return Upfirdn2dCuda
def upfirdn2d(x, f, up=1, down=1, padding=0, flip_filter=False, gain=1,
impl='cuda'):
"""Pad, upsample, filter, and downsample a batch of 2D images.
Performs the following sequence of operations for each channel:
1. Upsample the image by inserting N-1 zeros after each pixel (`up`).
2. Pad the image with the specified number of zeros on each side (`padding`).
Negative padding corresponds to cropping the image.
3. Convolve the image with the specified 2D FIR filter (`f`), shrinking it
so that the footprint of all output pixels lies within the input image.
4. Downsample the image by keeping every Nth pixel (`down`).
This sequence of operations bears close resemblance to scipy.signal.upfirdn().
The fused op is considerably more efficient than performing the same calculation
using standard PyTorch ops. It supports gradients of arbitrary order.
Args:
x: Float32/float64/float16 input tensor of the shape
`[batch_size, num_channels, in_height, in_width]`.
f: Float32 FIR filter of the shape
`[filter_height, filter_width]` (non-separable),
`[filter_taps]` (separable), or
`None` (identity).
up: Integer upsampling factor. Can be a single int or a list/tuple
`[x, y]` (default: 1).
down: Integer downsampling factor. Can be a single int or a list/tuple
`[x, y]` (default: 1).
padding: Padding with respect to the upsampled image. Can be a single number
or a list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
(default: 0).
flip_filter: False = convolution, True = correlation (default: False).
gain: Overall scaling factor for signal magnitude (default: 1).
impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`).
Returns:
Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
"""
assert isinstance(x, torch.Tensor)
assert impl in ['ref', 'cuda']
if impl == 'cuda' and x.device.type == 'cuda' and _init():
return _upfirdn2d_cuda(up=up, down=down, padding=padding,
flip_filter=flip_filter, gain=gain).apply(x, f)
return _upfirdn2d_ref(x, f, up=up, down=down, padding=padding,
flip_filter=flip_filter, gain=gain)
def upsample(in_tens, out_H=64):
in_H = in_tens.shape[2]
scale_factor = 1.0 * out_H / in_H
return nn.Upsample(scale_factor=scale_factor, mode='bilinear',
align_corners=False)(in_tens)
class FusedLeakyReLUFunctionBackward(Function):
@staticmethod
def forward(ctx, grad_output, out, bias, negative_slope, scale):
ctx.save_for_backward(out)
ctx.negative_slope = negative_slope
ctx.scale = scale
empty = grad_output.new_empty(0)
grad_input = fused.fused_bias_act(grad_output, empty, out, 3, 1,
negative_slope, scale)
dim = [0]
if grad_input.ndim > 2:
dim += list(range(2, grad_input.ndim))
if bias:
grad_bias = grad_input.sum(dim).detach()
else:
grad_bias = empty
return grad_input, grad_bias
@staticmethod
def backward(ctx, gradgrad_input, gradgrad_bias):
out, = ctx.saved_tensors
gradgrad_out = fused.fused_bias_act(gradgrad_input, gradgrad_bias,
out, 3, 1, ctx.negative_slope, ctx.scale)
return gradgrad_out, None, None, None, None
class FusedLeakyReLUFunction(Function):
@staticmethod
def forward(ctx, input, bias, negative_slope, scale):
empty = input.new_empty(0)
ctx.bias = bias is not None
if bias is None:
bias = empty
out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope,
scale)
ctx.save_for_backward(out)
ctx.negative_slope = negative_slope
ctx.scale = scale
return out
@staticmethod
def backward(ctx, grad_output):
out, = ctx.saved_tensors
grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply(
grad_output, out, ctx.bias, ctx.negative_slope, ctx.scale)
if not ctx.bias:
grad_bias = None
return grad_input, grad_bias, None, None
class EqualLinear(nn.Module):
def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1,
activation=None):
super().__init__()
self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul))
if bias:
self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init))
else:
self.bias = None
self.activation = activation
self.scale = 1 / math.sqrt(in_dim) * lr_mul
self.lr_mul = lr_mul
def forward(self, input):
if self.activation:
out = F.linear(input, self.weight * self.scale)
out = fused_leaky_relu(out, self.bias * self.lr_mul)
else:
out = F.linear(input, self.weight * self.scale, bias=self.bias *
self.lr_mul)
return out
def __repr__(self):
return (
f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})'
)
class Blur(nn.Module):
def __init__(self, kernel, pad, upsample_factor=1):
super().__init__()
kernel = make_kernel(kernel)
if upsample_factor > 1:
kernel = kernel * upsample_factor ** 2
self.register_buffer('kernel', kernel)
self.pad = pad
def forward(self, input):
out = upfirdn2d(input, self.kernel, pad=self.pad)
return out
class ModulatedConv2d(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, style_dim,
demodulate=True, upsample=False, downsample=False, blur_kernel=[1,
3, 3, 1]):
super().__init__()
self.eps = 1e-08
self.kernel_size = kernel_size
self.in_channel = in_channel
self.out_channel = out_channel
self.upsample = upsample
self.downsample = downsample
if upsample:
factor = 2
p = len(blur_kernel) - factor - (kernel_size - 1)
pad0 = (p + 1) // 2 + factor - 1
pad1 = p // 2 + 1
self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor
=factor)
if downsample:
factor = 2
p = len(blur_kernel) - factor + (kernel_size - 1)
pad0 = (p + 1) // 2
pad1 = p // 2
self.blur = Blur(blur_kernel, pad=(pad0, pad1))
fan_in = in_channel * kernel_size ** 2
self.scale = 1 / math.sqrt(fan_in)
self.padding = kernel_size // 2
self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel,
kernel_size, kernel_size))
self.modulation = EqualLinear(style_dim, in_channel, bias_init=1)
self.demodulate = demodulate
def __repr__(self):
return (
f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample}, downsample={self.downsample})'
)
def forward(self, input, style):
batch, in_channel, height, width = input.shape
style = self.modulation(style).view(batch, 1, in_channel, 1, 1)
weight = self.scale * self.weight * style
if self.demodulate:
demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-08)
weight = weight * demod.view(batch, self.out_channel, 1, 1, 1)
weight = weight.view(batch * self.out_channel, in_channel, self.
kernel_size, self.kernel_size)
if self.upsample:
input = input.view(1, batch * in_channel, height, width)
weight = weight.view(batch, self.out_channel, in_channel, self.
kernel_size, self.kernel_size)
weight = weight.transpose(1, 2).reshape(batch * in_channel,
self.out_channel, self.kernel_size, self.kernel_size)
out = F.conv_transpose2d(input, weight, padding=0, stride=2,
groups=batch)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
out = self.blur(out)
elif self.downsample:
input = self.blur(input)
_, _, height, width = input.shape
input = input.view(1, batch * in_channel, height, width)
out = F.conv2d(input, weight, padding=0, stride=2, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
else:
input = input.view(1, batch * in_channel, height, width)
out = F.conv2d(input, weight, padding=self.padding, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
return out
class Upsample(nn.Module):
def __init__(self, kernel, factor=2):
super().__init__()
self.factor = factor
kernel = make_kernel(kernel) * factor ** 2
self.register_buffer('kernel', kernel)
p = kernel.shape[0] - factor
pad0 = (p + 1) // 2 + factor - 1
pad1 = p // 2
self.pad = pad0, pad1
def forward(self, input):
out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad=
self.pad)
return out
class ToSEGNew(nn.Module):
def __init__(self, in_channel, out_channel, style_dim, upsample=True,
blur_kernel=[1, 3, 3, 1]):
super().__init__()
if upsample:
self.upsample = Upsample(blur_kernel)
self.conv = ModulatedConv2d(in_channel, out_channel, 1, style_dim,
demodulate=False)
self.bias = nn.Parameter(torch.zeros(1, out_channel, 1, 1))
def forward(self, input_0, input_1):
primals_6 = self.bias
primals_5 = self.conv.weight
primals_2 = self.conv.modulation.weight
primals_3 = self.conv.modulation.bias
primals_1 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
| crobbins327/semanticGAN_WSI | ToSEG | false | 1,813 | [
"BSD-2-Clause",
"MIT"
] | 0 | 4046ddc822f463e03952402247f79d540bf7be95 | https://github.com/crobbins327/semanticGAN_WSI/tree/4046ddc822f463e03952402247f79d540bf7be95 | from torch.autograd import Function
import math
import torch
import warnings
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.cpp_extension
def fused_leaky_relu(input, bias=None, negative_slope=0.2, scale=2 ** 0.5):
if input.device.type == 'cpu':
if bias is not None:
rest_dim = [1] * (input.ndim - bias.ndim - 1)
return F.leaky_relu(input + bias.view(1, bias.shape[0], *
rest_dim), negative_slope=0.2) * scale
else:
return F.leaky_relu(input, negative_slope=0.2) * scale
else:
return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale)
def make_kernel(k):
k = torch.tensor(k, dtype=torch.float32)
if k.ndim == 1:
k = k[None, :] * k[:, None]
k /= k.sum()
return k
def _init():
global _inited, _plugin
if not _inited:
sources = ['upfirdn2d.cpp', 'upfirdn2d.cu']
sources = [os.path.join(os.path.dirname(__file__), s) for s in sources]
try:
_plugin = custom_ops.get_plugin('upfirdn2d_plugin', sources=
sources, extra_cuda_cflags=['--use_fast_math'])
except:
warnings.warn(
"""Failed to build CUDA kernels for upfirdn2d. Falling back to slow reference implementation. Details:
"""
+ traceback.format_exc())
return _plugin is not None
def _get_filter_size(f):
if f is None:
return 1, 1
assert isinstance(f, torch.Tensor) and f.ndim in [1, 2]
fw = f.shape[-1]
fh = f.shape[0]
with misc.suppress_tracer_warnings():
fw = int(fw)
fh = int(fh)
misc.assert_shape(f, [fh, fw][:f.ndim])
assert fw >= 1 and fh >= 1
return fw, fh
def _parse_padding(padding):
if isinstance(padding, int):
padding = [padding, padding]
assert isinstance(padding, (list, tuple))
assert all(isinstance(x, int) for x in padding)
if len(padding) == 2:
padx, pady = padding
padding = [padx, padx, pady, pady]
padx0, padx1, pady0, pady1 = padding
return padx0, padx1, pady0, pady1
def _parse_scaling(scaling):
if isinstance(scaling, int):
scaling = [scaling, scaling]
assert isinstance(scaling, (list, tuple))
assert all(isinstance(x, int) for x in scaling)
sx, sy = scaling
assert sx >= 1 and sy >= 1
return sx, sy
def _upfirdn2d_cuda(up=1, down=1, padding=0, flip_filter=False, gain=1):
"""Fast CUDA implementation of `upfirdn2d()` using custom ops.
"""
upx, upy = _parse_scaling(up)
downx, downy = _parse_scaling(down)
padx0, padx1, pady0, pady1 = _parse_padding(padding)
key = upx, upy, downx, downy, padx0, padx1, pady0, pady1, flip_filter, gain
if key in _upfirdn2d_cuda_cache:
return _upfirdn2d_cuda_cache[key]
class Upfirdn2dCuda(torch.autograd.Function):
@staticmethod
def forward(ctx, x, f):
assert isinstance(x, torch.Tensor) and x.ndim == 4
if f is None:
f = torch.ones([1, 1], dtype=torch.float32, device=x.device)
assert isinstance(f, torch.Tensor) and f.ndim in [1, 2]
y = x
if f.ndim == 2:
y = _plugin.upfirdn2d(y, f, upx, upy, downx, downy, padx0,
padx1, pady0, pady1, flip_filter, gain)
else:
y = _plugin.upfirdn2d(y, f.unsqueeze(0), upx, 1, downx, 1,
padx0, padx1, 0, 0, flip_filter, np.sqrt(gain))
y = _plugin.upfirdn2d(y, f.unsqueeze(1), 1, upy, 1, downy,
0, 0, pady0, pady1, flip_filter, np.sqrt(gain))
ctx.save_for_backward(f)
ctx.x_shape = x.shape
return y
@staticmethod
def backward(ctx, dy):
f, = ctx.saved_tensors
_, _, ih, iw = ctx.x_shape
_, _, oh, ow = dy.shape
fw, fh = _get_filter_size(f)
p = [fw - padx0 -
# ... truncated (>4000 chars) for memory efficiency |
ResidualAttentionBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/wd/cwdz7kqs3uwyg53zsyekt77eye7yjl6v7vulow2q6ni534mkf6zw.py
# Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# layer_norm => add, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_1, [2]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/vs/cvsfvbs4wlaqvwxm3svg65dnhcq336ptudvn6xetnbnrtzj7xssn.py
# Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# layer_norm => add, add_1, mul, mul_1, rsqrt, sub, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_1, [2]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_3), kwargs = {})
triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/ov/covnoswvamrduqnlpu7ioajrnn3hhte55stjgoorf2z6wjvkkv75.py
# Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# mul_1 => mul_3
# Graph fragment:
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem_3, 1.0), kwargs = {})
triton_poi_fused_mul_2 = async_compile.triton('triton_poi_fused_mul_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (1 + (3*x2)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (1 + (3*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/a3/ca3gcd62vddrpdf7wel6bpef44cv6vzsauaaigftsutida5ifj66.py
# Topologically Sorted Source Nodes: [mul, softmax], Original ATen: [aten.mul, aten._softmax]
# Source node to ATen node mapping:
# mul => mul_2
# softmax => amax, clone, exp, sub_1, sum_1
# Graph fragment:
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem_2, 1.0), kwargs = {})
# %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%view_3,), kwargs = {memory_format: torch.contiguous_format})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%clone, [-1], True), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clone, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
triton_poi_fused__softmax_mul_3 = async_compile.triton('triton_poi_fused__softmax_mul_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_mul_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_mul_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
x3 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (3*x4), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (3*x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + (x0 + (16*x3)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr2 + (4 + x0 + (16*x3)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + (8 + x0 + (16*x3)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr2 + (12 + x0 + (16*x3)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp4 * tmp7
tmp9 = triton_helpers.maximum(tmp6, tmp8)
tmp11 = tmp4 * tmp10
tmp12 = triton_helpers.maximum(tmp9, tmp11)
tmp14 = tmp4 * tmp13
tmp15 = triton_helpers.maximum(tmp12, tmp14)
tmp16 = tmp6 - tmp15
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp8 - tmp15
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp17 + tmp19
tmp21 = tmp11 - tmp15
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp20 + tmp22
tmp24 = tmp14 - tmp15
tmp25 = tl_math.exp(tmp24)
tmp26 = tmp23 + tmp25
tl.store(out_ptr0 + (x4), tmp4, xmask)
tl.store(out_ptr1 + (x4), tmp15, xmask)
tl.store(out_ptr2 + (x4), tmp26, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/ry/crysvnpehef7t7qavdv4tjejxu2fplwpz5t7ziu4zobu2iuwbxgh.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => amax, clone, div, exp, sub_1, sum_1
# Graph fragment:
# %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%view_3,), kwargs = {memory_format: torch.contiguous_format})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%clone, [-1], True), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clone, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_4 = async_compile.triton('triton_poi_fused__softmax_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = (xindex // 4)
x0 = xindex % 4
x1 = (xindex // 4) % 4
x3 = (xindex // 64)
x2 = (xindex // 16) % 4
tmp0 = tl.load(in_ptr0 + (x4), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x1 + (4*x0) + (16*x3)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x4), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr3 + (x4), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp2 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp7 = tmp5 / tmp6
tl.store(out_ptr0 + (x0 + (4*x2) + (16*x1) + (64*x3)), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/vt/cvtpzj4f2hmyerpchbm5i4eart4tas2pfh5e56t2solbcays5o7r.py
# Topologically Sorted Source Nodes: [einsum_1], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# einsum_1 => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_6,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_5 = async_compile.triton('triton_poi_fused_clone_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (2 + (3*x1) + (12*x0) + (48*x2)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (2 + (3*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/4k/c4kxxzyxk45cygbwnqkt5vb2udxol67wuotkh6zmuwsinb63uprn.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# x_2 => clone_2
# Graph fragment:
# %clone_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%view_8,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_6 = async_compile.triton('triton_poi_fused_clone_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/wy/cwy4kxyjt4etqj66cdnstjhqnyeqdu5ftn4ptejfyamzau7n3xqh.py
# Topologically Sorted Source Nodes: [x_2, x_3, layer_norm_1], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# layer_norm_1 => var_mean_1
# x_2 => add_2
# x_3 => add_3
# Graph fragment:
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_10, %primals_7), kwargs = {})
# %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %add_2), kwargs = {})
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_3, [2]), kwargs = {correction: 0, keepdim: True})
triton_poi_fused_add_native_layer_norm_7 = async_compile.triton('triton_poi_fused_add_native_layer_norm_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (0))
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp6 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + (1))
tmp9 = tl.broadcast_to(tmp8, [XBLOCK])
tmp13 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr2 + (2))
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp20 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr2 + (3))
tmp23 = tl.broadcast_to(tmp22, [XBLOCK])
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp10 = tmp7 + tmp9
tmp11 = tmp6 + tmp10
tmp12 = tmp5 + tmp11
tmp17 = tmp14 + tmp16
tmp18 = tmp13 + tmp17
tmp19 = tmp12 + tmp18
tmp24 = tmp21 + tmp23
tmp25 = tmp20 + tmp24
tmp26 = tmp19 + tmp25
tmp27 = 4.0
tmp28 = tmp26 / tmp27
tmp29 = tmp5 - tmp28
tmp30 = tmp29 * tmp29
tmp31 = tmp11 - tmp28
tmp32 = tmp31 * tmp31
tmp33 = tmp30 + tmp32
tmp34 = tmp18 - tmp28
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp25 - tmp28
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = tmp39 / tmp27
tl.store(out_ptr0 + (x0), tmp28, xmask)
tl.store(out_ptr1 + (x0), tmp40, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/2f/c2fol3uyk6gyusnfl52autx5hdhf3lnnchrftxvbnmmxfs5li5o4.py
# Topologically Sorted Source Nodes: [x_2, x_3, layer_norm_1], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# layer_norm_1 => add_4, add_5, mul_5, mul_6, rsqrt_1, sub_2
# x_2 => add_2
# x_3 => add_3
# Graph fragment:
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_10, %primals_7), kwargs = {})
# %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %add_2), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_5, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %getitem_6), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %rsqrt_1), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_5, %primals_8), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_6, %primals_9), kwargs = {})
triton_poi_fused_add_native_layer_norm_8 = async_compile.triton('triton_poi_fused_add_native_layer_norm_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr6 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp6 = tmp4 - tmp5
tmp8 = 1e-05
tmp9 = tmp7 + tmp8
tmp10 = libdevice.rsqrt(tmp9)
tmp11 = tmp6 * tmp10
tmp13 = tmp11 * tmp12
tmp15 = tmp13 + tmp14
tl.store(out_ptr0 + (x2), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/7s/c7sh7255dxwfgfvzmp2g3slml6c3g5jdyjpq3rxua55pu5gg2wfd.py
# Topologically Sorted Source Nodes: [gelu], Original ATen: [aten.gelu]
# Source node to ATen node mapping:
# gelu => add_6, erf, mul_7, mul_8, mul_9
# Graph fragment:
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_12, 0.5), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_12, 0.7071067811865476), kwargs = {})
# %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%mul_8,), kwargs = {})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1), kwargs = {})
# %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_7, %add_6), kwargs = {})
triton_poi_fused_gelu_9 = async_compile.triton('triton_poi_fused_gelu_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gelu_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_gelu_9(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/7n/c7nsuy663pmek7olaj7xnomqpsspowavb2zqwzrjkaz42y7dnrtc.py
# Topologically Sorted Source Nodes: [x_2, x_3, x_4], Original ATen: [aten.add]
# Source node to ATen node mapping:
# x_2 => add_2
# x_3 => add_3
# x_4 => add_7
# Graph fragment:
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_10, %primals_7), kwargs = {})
# %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %add_2), kwargs = {})
# %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_3, %view_14), kwargs = {})
triton_poi_fused_add_10 = async_compile.triton('triton_poi_fused_add_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_10(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_out_ptr0 + (x2), xmask)
tmp6 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp7 = tmp5 + tmp6
tmp8 = tmp4 + tmp7
tl.store(in_out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (12, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (16, 4), (4, 1))
assert_size_stride(primals_11, (16, ), (1, ))
assert_size_stride(primals_12, (4, 16), (16, 1))
assert_size_stride(primals_13, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm]
stream0 = get_raw_stream(0)
triton_poi_fused_native_layer_norm_0.run(primals_1, buf0, buf1, 16, grid=grid(16), stream=stream0)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_1.run(primals_1, buf0, buf1, primals_2, primals_3, buf2, 64, grid=grid(64), stream=stream0)
del primals_2
del primals_3
buf3 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 12), (1, 4), 0), out=buf3)
buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul]
triton_poi_fused_mul_2.run(buf3, primals_5, buf5, 64, grid=grid(64), stream=stream0)
buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 1, 4, 64), torch.float32)
buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 1, 4, 64), torch.float32)
# Topologically Sorted Source Nodes: [mul, softmax], Original ATen: [aten.mul, aten._softmax]
triton_poi_fused__softmax_mul_3.run(buf3, primals_5, buf5, buf4, buf6, buf7, 64, grid=grid(64), stream=stream0)
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_4.run(buf4, buf5, buf6, buf7, buf8, 256, grid=grid(256), stream=stream0)
buf9 = reinterpret_tensor(buf7, (4, 4, 4, 1, 1), (16, 4, 1, 1, 1), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [einsum_1], Original ATen: [aten.clone]
triton_poi_fused_clone_5.run(buf3, primals_5, buf9, 64, grid=grid(64), stream=stream0)
del buf3
del primals_5
buf10 = reinterpret_tensor(buf6, (16, 4, 1), (4, 1, 1), 0); del buf6 # reuse
# Topologically Sorted Source Nodes: [einsum_1], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10)
buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.clone]
triton_poi_fused_clone_6.run(buf10, buf11, 16, 4, grid=grid(16, 4), stream=stream0)
buf12 = reinterpret_tensor(buf10, (16, 4), (4, 1), 0); del buf10 # reuse
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf11, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf12)
buf13 = buf1; del buf1 # reuse
buf14 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x_2, x_3, layer_norm_1], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_7.run(primals_1, buf12, primals_7, buf13, buf14, 16, grid=grid(16), stream=stream0)
buf15 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2, x_3, layer_norm_1], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_8.run(primals_1, buf12, primals_7, buf13, buf14, primals_8, primals_9, buf15, 64, grid=grid(64), stream=stream0)
del buf13
del buf14
del primals_9
buf16 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_11, reinterpret_tensor(buf15, (16, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf16)
del primals_11
buf17 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [gelu], Original ATen: [aten.gelu]
triton_poi_fused_gelu_9.run(buf16, buf17, 256, grid=grid(256), stream=stream0)
buf18 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf17, (16, 16), (16, 1), 0), reinterpret_tensor(primals_12, (16, 4), (1, 16), 0), out=buf18)
buf19 = reinterpret_tensor(buf18, (4, 4, 4), (16, 4, 1), 0); del buf18 # reuse
# Topologically Sorted Source Nodes: [x_2, x_3, x_4], Original ATen: [aten.add]
triton_poi_fused_add_10.run(buf19, primals_1, buf12, primals_7, primals_13, 64, grid=grid(64), stream=stream0)
del primals_13
return (buf19, primals_1, primals_7, primals_8, reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(buf4, (4, 4, 4, 1, 1), (16, 1, 4, 1, 1), 0), reinterpret_tensor(buf5, (4, 4, 1, 4, 1), (16, 1, 1, 4, 1), 0), buf8, reinterpret_tensor(buf11, (16, 4), (4, 1), 0), buf12, reinterpret_tensor(buf15, (16, 4), (4, 1), 0), buf16, reinterpret_tensor(buf17, (16, 16), (16, 1), 0), primals_12, primals_10, primals_6, reinterpret_tensor(buf9, (16, 1, 4), (4, 1, 1), 0), primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch as th
import torch.nn as nn
class LayerNorm(nn.LayerNorm):
"""
Implementation that supports fp16 inputs but fp32 gains/biases.
"""
def forward(self, x: 'th.Tensor'):
return super().forward(x.float())
class QKVMultiheadAttention(nn.Module):
def __init__(self, n_heads: 'int', n_ctx: 'int'):
super().__init__()
self.n_heads = n_heads
self.n_ctx = n_ctx
def forward(self, qkv):
bs, n_ctx, width = qkv.shape
attn_ch = width // self.n_heads // 3
scale = 1 / math.sqrt(math.sqrt(attn_ch))
qkv = qkv.view(bs, n_ctx, self.n_heads, -1)
q, k, v = th.split(qkv, attn_ch, dim=-1)
weight = th.einsum('bthc,bshc->bhts', q * scale, k * scale)
wdtype = weight.dtype
weight = th.softmax(weight.float(), dim=-1).type(wdtype)
return th.einsum('bhts,bshc->bthc', weight, v).reshape(bs, n_ctx, -1)
class MultiheadAttention(nn.Module):
def __init__(self, n_ctx, width, heads):
super().__init__()
self.n_ctx = n_ctx
self.width = width
self.heads = heads
self.c_qkv = nn.Linear(width, width * 3)
self.c_proj = nn.Linear(width, width)
self.attention = QKVMultiheadAttention(heads, n_ctx)
def forward(self, x):
x = self.c_qkv(x)
x = self.attention(x)
x = self.c_proj(x)
return x
class MLP(nn.Module):
def __init__(self, width):
super().__init__()
self.width = width
self.c_fc = nn.Linear(width, width * 4)
self.c_proj = nn.Linear(width * 4, width)
self.gelu = nn.GELU()
def forward(self, x):
return self.c_proj(self.gelu(self.c_fc(x)))
class ResidualAttentionBlock(nn.Module):
def __init__(self, n_ctx: 'int', width: 'int', heads: 'int'):
super().__init__()
self.attn = MultiheadAttention(n_ctx, width, heads)
self.ln_1 = LayerNorm(width)
self.mlp = MLP(width)
self.ln_2 = LayerNorm(width)
def forward(self, x: 'th.Tensor'):
x = x + self.attn(self.ln_1(x))
x = x + self.mlp(self.ln_2(x))
return x
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'n_ctx': 4, 'width': 4, 'heads': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
import torch as th
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (1 + 3 * x2), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (1 + 3 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused__softmax_mul_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
x3 = xindex // 16
tmp0 = tl.load(in_ptr0 + 3 * x4, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 3 * x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + (x0 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr2 + (4 + x0 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr2 + (8 + x0 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp13 = tl.load(in_ptr2 + (12 + x0 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp4 * tmp7
tmp9 = triton_helpers.maximum(tmp6, tmp8)
tmp11 = tmp4 * tmp10
tmp12 = triton_helpers.maximum(tmp9, tmp11)
tmp14 = tmp4 * tmp13
tmp15 = triton_helpers.maximum(tmp12, tmp14)
tmp16 = tmp6 - tmp15
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp8 - tmp15
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp17 + tmp19
tmp21 = tmp11 - tmp15
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp20 + tmp22
tmp24 = tmp14 - tmp15
tmp25 = tl_math.exp(tmp24)
tmp26 = tmp23 + tmp25
tl.store(out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr1 + x4, tmp15, xmask)
tl.store(out_ptr2 + x4, tmp26, xmask)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex // 4
x0 = xindex % 4
x1 = xindex // 4 % 4
x3 = xindex // 64
x2 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x1 + 4 * x0 + 16 * x3), xmask,
eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr3 + x4, xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp2 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp7 = tmp5 / tmp6
tl.store(out_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), tmp7, xmask)
@triton.jit
def triton_poi_fused_clone_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (2 + 3 * x1 + 12 * x0 + 48 * x2), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (2 + 3 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_clone_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + 1)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK])
tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr2 + 2)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK])
tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp21 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp22 = tl.load(in_ptr2 + 3)
tmp23 = tl.broadcast_to(tmp22, [XBLOCK])
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp10 = tmp7 + tmp9
tmp11 = tmp6 + tmp10
tmp12 = tmp5 + tmp11
tmp17 = tmp14 + tmp16
tmp18 = tmp13 + tmp17
tmp19 = tmp12 + tmp18
tmp24 = tmp21 + tmp23
tmp25 = tmp20 + tmp24
tmp26 = tmp19 + tmp25
tmp27 = 4.0
tmp28 = tmp26 / tmp27
tmp29 = tmp5 - tmp28
tmp30 = tmp29 * tmp29
tmp31 = tmp11 - tmp28
tmp32 = tmp31 * tmp31
tmp33 = tmp30 + tmp32
tmp34 = tmp18 - tmp28
tmp35 = tmp34 * tmp34
tmp36 = tmp33 + tmp35
tmp37 = tmp25 - tmp28
tmp38 = tmp37 * tmp37
tmp39 = tmp36 + tmp38
tmp40 = tmp39 / tmp27
tl.store(out_ptr0 + x0, tmp28, xmask)
tl.store(out_ptr1 + x0, tmp40, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp6 = tmp4 - tmp5
tmp8 = 1e-05
tmp9 = tmp7 + tmp8
tmp10 = libdevice.rsqrt(tmp9)
tmp11 = tmp6 * tmp10
tmp13 = tmp11 * tmp12
tmp15 = tmp13 + tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
@triton.jit
def triton_poi_fused_gelu_9(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_10(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_out_ptr0 + x2, xmask)
tmp6 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tmp7 = tmp5 + tmp6
tmp8 = tmp4 + tmp7
tl.store(in_out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (12,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (16, 4), (4, 1))
assert_size_stride(primals_11, (16,), (1,))
assert_size_stride(primals_12, (4, 16), (16, 1))
assert_size_stride(primals_13, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(16)](primals_1, buf0,
buf1, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(64)](primals_1, buf0,
buf1, primals_2, primals_3, buf2, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del primals_2
del primals_3
buf3 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 12), (1, 4), 0), out=buf3)
buf5 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_mul_2[grid(64)](buf3, primals_5, buf5, 64, XBLOCK=
64, num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 1, 4, 64), torch.float32)
buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 1, 4, 64), torch.float32)
triton_poi_fused__softmax_mul_3[grid(64)](buf3, primals_5, buf5,
buf4, buf6, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_4[grid(256)](buf4, buf5, buf6, buf7, buf8,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf9 = reinterpret_tensor(buf7, (4, 4, 4, 1, 1), (16, 4, 1, 1, 1), 0)
del buf7
triton_poi_fused_clone_5[grid(64)](buf3, primals_5, buf9, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del buf3
del primals_5
buf10 = reinterpret_tensor(buf6, (16, 4, 1), (4, 1, 1), 0)
del buf6
extern_kernels.bmm(reinterpret_tensor(buf8, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf9, (16, 4, 1), (4, 1, 0), 0), out=buf10)
buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_clone_6[grid(16, 4)](buf10, buf11, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf12 = reinterpret_tensor(buf10, (16, 4), (4, 1), 0)
del buf10
extern_kernels.mm(reinterpret_tensor(buf11, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf12)
buf13 = buf1
del buf1
buf14 = buf0
del buf0
triton_poi_fused_add_native_layer_norm_7[grid(16)](primals_1, buf12,
primals_7, buf13, buf14, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf15 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_8[grid(64)](primals_1, buf12,
primals_7, buf13, buf14, primals_8, primals_9, buf15, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del buf13
del buf14
del primals_9
buf16 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_11, reinterpret_tensor(buf15, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_10, (4, 16), (1, 4), 0),
alpha=1, beta=1, out=buf16)
del primals_11
buf17 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
triton_poi_fused_gelu_9[grid(256)](buf16, buf17, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf18 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf17, (16, 16), (16, 1), 0),
reinterpret_tensor(primals_12, (16, 4), (1, 16), 0), out=buf18)
buf19 = reinterpret_tensor(buf18, (4, 4, 4), (16, 4, 1), 0)
del buf18
triton_poi_fused_add_10[grid(64)](buf19, primals_1, buf12,
primals_7, primals_13, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_13
return buf19, primals_1, primals_7, primals_8, reinterpret_tensor(buf2,
(16, 4), (4, 1), 0), reinterpret_tensor(buf4, (4, 4, 4, 1, 1), (16,
1, 4, 1, 1), 0), reinterpret_tensor(buf5, (4, 4, 1, 4, 1), (16, 1,
1, 4, 1), 0), buf8, reinterpret_tensor(buf11, (16, 4), (4, 1), 0
), buf12, reinterpret_tensor(buf15, (16, 4), (4, 1), 0
), buf16, reinterpret_tensor(buf17, (16, 16), (16, 1), 0
), primals_12, primals_10, primals_6, reinterpret_tensor(buf9, (16,
1, 4), (4, 1, 1), 0), primals_4
class LayerNorm(nn.LayerNorm):
"""
Implementation that supports fp16 inputs but fp32 gains/biases.
"""
def forward(self, x: 'th.Tensor'):
return super().forward(x.float())
class QKVMultiheadAttention(nn.Module):
def __init__(self, n_heads: 'int', n_ctx: 'int'):
super().__init__()
self.n_heads = n_heads
self.n_ctx = n_ctx
def forward(self, qkv):
bs, n_ctx, width = qkv.shape
attn_ch = width // self.n_heads // 3
scale = 1 / math.sqrt(math.sqrt(attn_ch))
qkv = qkv.view(bs, n_ctx, self.n_heads, -1)
q, k, v = th.split(qkv, attn_ch, dim=-1)
weight = th.einsum('bthc,bshc->bhts', q * scale, k * scale)
wdtype = weight.dtype
weight = th.softmax(weight.float(), dim=-1).type(wdtype)
return th.einsum('bhts,bshc->bthc', weight, v).reshape(bs, n_ctx, -1)
class MultiheadAttention(nn.Module):
def __init__(self, n_ctx, width, heads):
super().__init__()
self.n_ctx = n_ctx
self.width = width
self.heads = heads
self.c_qkv = nn.Linear(width, width * 3)
self.c_proj = nn.Linear(width, width)
self.attention = QKVMultiheadAttention(heads, n_ctx)
def forward(self, x):
x = self.c_qkv(x)
x = self.attention(x)
x = self.c_proj(x)
return x
class MLP(nn.Module):
def __init__(self, width):
super().__init__()
self.width = width
self.c_fc = nn.Linear(width, width * 4)
self.c_proj = nn.Linear(width * 4, width)
self.gelu = nn.GELU()
def forward(self, x):
return self.c_proj(self.gelu(self.c_fc(x)))
class ResidualAttentionBlockNew(nn.Module):
def __init__(self, n_ctx: 'int', width: 'int', heads: 'int'):
super().__init__()
self.attn = MultiheadAttention(n_ctx, width, heads)
self.ln_1 = LayerNorm(width)
self.mlp = MLP(width)
self.ln_2 = LayerNorm(width)
def forward(self, input_0):
primals_4 = self.attn.c_qkv.weight
primals_5 = self.attn.c_qkv.bias
primals_6 = self.attn.c_proj.weight
primals_2 = self.attn.c_proj.bias
primals_3 = self.ln_1.weight
primals_7 = self.ln_1.bias
primals_10 = self.mlp.c_fc.weight
primals_11 = self.mlp.c_fc.bias
primals_12 = self.mlp.c_proj.weight
primals_8 = self.mlp.c_proj.bias
primals_9 = self.ln_2.weight
primals_13 = self.ln_2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0]
| dashstander/glide-text2im | ResidualAttentionBlock | false | 1,814 | [
"MIT"
] | 0 | 58f03a871ee0567e27fccc40df98203e675a9b8e | https://github.com/dashstander/glide-text2im/tree/58f03a871ee0567e27fccc40df98203e675a9b8e | import math
import torch
import torch as th
import torch.nn as nn
class LayerNorm(nn.LayerNorm):
"""
Implementation that supports fp16 inputs but fp32 gains/biases.
"""
def forward(self, x: 'th.Tensor'):
return super().forward(x.float())
class QKVMultiheadAttention(nn.Module):
def __init__(self, n_heads: 'int', n_ctx: 'int'):
super().__init__()
self.n_heads = n_heads
self.n_ctx = n_ctx
def forward(self, qkv):
bs, n_ctx, width = qkv.shape
attn_ch = width // self.n_heads // 3
scale = 1 / math.sqrt(math.sqrt(attn_ch))
qkv = qkv.view(bs, n_ctx, self.n_heads, -1)
q, k, v = th.split(qkv, attn_ch, dim=-1)
weight = th.einsum('bthc,bshc->bhts', q * scale, k * scale)
wdtype = weight.dtype
weight = th.softmax(weight.float(), dim=-1).type(wdtype)
return th.einsum('bhts,bshc->bthc', weight, v).reshape(bs, n_ctx, -1)
class MultiheadAttention(nn.Module):
def __init__(self, n_ctx, width, heads):
super().__init__()
self.n_ctx = n_ctx
self.width = width
self.heads = heads
self.c_qkv = nn.Linear(width, width * 3)
self.c_proj = nn.Linear(width, width)
self.attention = QKVMultiheadAttention(heads, n_ctx)
def forward(self, x):
x = self.c_qkv(x)
x = self.attention(x)
x = self.c_proj(x)
return x
class MLP(nn.Module):
def __init__(self, width):
super().__init__()
self.width = width
self.c_fc = nn.Linear(width, width * 4)
self.c_proj = nn.Linear(width * 4, width)
self.gelu = nn.GELU()
def forward(self, x):
return self.c_proj(self.gelu(self.c_fc(x)))
class Model(nn.Module):
def __init__(self, n_ctx: 'int', width: 'int', heads: 'int'):
super().__init__()
self.attn = MultiheadAttention(n_ctx, width, heads)
self.ln_1 = LayerNorm(width)
self.mlp = MLP(width)
self.ln_2 = LayerNorm(width)
def forward(self, x: 'th.Tensor'):
x = x + self.attn(self.ln_1(x))
x = x + self.mlp(self.ln_2(x))
return x
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
FilterResponseNorm_layer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/u2/cu2txft3g3dspulmchkg3fqkleaipbmm7ejo5tfpnmqjxvaiowxt.py
# Topologically Sorted Source Nodes: [square, nu2, mul, add, sqrt, x, mul_1, y, z], Original ATen: [aten.pow, aten.mean, aten.mul, aten.add, aten.sqrt, aten.div, aten.maximum]
# Source node to ATen node mapping:
# add => add
# mul => mul
# mul_1 => mul_1
# nu2 => mean
# sqrt => sqrt
# square => pow_1
# x => div
# y => add_1
# z => maximum
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_1, 2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [2, 3], True), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, 1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, 1e-06), kwargs = {})
# %sqrt : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, %sqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %div), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_3), kwargs = {})
# %maximum : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%add_1, %primals_4), kwargs = {})
triton_per_fused_add_div_maximum_mean_mul_pow_sqrt_0 = async_compile.triton('triton_per_fused_add_div_maximum_mean_mul_pow_sqrt_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_maximum_mean_mul_pow_sqrt_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_maximum_mean_mul_pow_sqrt_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp11 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr3 + (x2), xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = 16.0
tmp7 = tmp5 / tmp6
tmp8 = 1e-06
tmp9 = tmp7 + tmp8
tmp10 = libdevice.sqrt(tmp9)
tmp12 = 1.0
tmp13 = tmp0 * tmp12
tmp14 = tmp13 / tmp10
tmp15 = tmp11 * tmp14
tmp17 = tmp15 + tmp16
tmp19 = triton_helpers.maximum(tmp17, tmp18)
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp10, xmask)
tl.store(out_ptr0 + (r1 + (16*x0)), tmp19, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_4, (1, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0); del buf0 # reuse
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [square, nu2, mul, add, sqrt, x, mul_1, y, z], Original ATen: [aten.pow, aten.mean, aten.mul, aten.add, aten.sqrt, aten.div, aten.maximum]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_maximum_mean_mul_pow_sqrt_0.run(buf1, primals_1, primals_2, primals_3, primals_4, buf2, 16, 16, grid=grid(16), stream=stream0)
return (buf2, primals_1, primals_2, primals_3, primals_4, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class FilterResponseNorm_layer(nn.Module):
def __init__(self, num_filters, eps=1e-06):
super(FilterResponseNorm_layer, self).__init__()
self.eps = eps
par_shape = 1, num_filters, 1, 1
self.tau = torch.nn.Parameter(torch.zeros(par_shape))
self.beta = torch.nn.Parameter(torch.zeros(par_shape))
self.gamma = torch.nn.Parameter(torch.ones(par_shape))
def forward(self, x):
nu2 = torch.mean(torch.square(x), dim=[2, 3], keepdim=True)
x = x * 1 / torch.sqrt(nu2 + self.eps)
y = self.gamma * x + self.beta
z = torch.max(y, self.tau)
return z
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_filters': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_add_div_maximum_mean_mul_pow_sqrt_0(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp11 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr3 + x2, xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = 16.0
tmp7 = tmp5 / tmp6
tmp8 = 1e-06
tmp9 = tmp7 + tmp8
tmp10 = libdevice.sqrt(tmp9)
tmp12 = 1.0
tmp13 = tmp0 * tmp12
tmp14 = tmp13 / tmp10
tmp15 = tmp11 * tmp14
tmp17 = tmp15 + tmp16
tmp19 = triton_helpers.maximum(tmp17, tmp18)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp10, xmask)
tl.store(out_ptr0 + (r1 + 16 * x0), tmp19, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_4, (1, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf0
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_add_div_maximum_mean_mul_pow_sqrt_0[grid(16)](buf1,
primals_1, primals_2, primals_3, primals_4, buf2, 16, 16,
XBLOCK=8, num_warps=2, num_stages=1)
return buf2, primals_1, primals_2, primals_3, primals_4, buf1
class FilterResponseNorm_layerNew(nn.Module):
def __init__(self, num_filters, eps=1e-06):
super(FilterResponseNorm_layerNew, self).__init__()
self.eps = eps
par_shape = 1, num_filters, 1, 1
self.tau = torch.nn.Parameter(torch.zeros(par_shape))
self.beta = torch.nn.Parameter(torch.zeros(par_shape))
self.gamma = torch.nn.Parameter(torch.ones(par_shape))
def forward(self, input_0):
primals_2 = self.tau
primals_3 = self.beta
primals_4 = self.gamma
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| deebuls/pytorch-cifar | FilterResponseNorm_layer | false | 1,815 | [
"MIT"
] | 0 | c6d9b16eeb00418d8c4f4f4c1e97f141c1f7d198 | https://github.com/deebuls/pytorch-cifar/tree/c6d9b16eeb00418d8c4f4f4c1e97f141c1f7d198 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, num_filters, eps=1e-06):
super().__init__()
self.eps = eps
par_shape = 1, num_filters, 1, 1
self.tau = torch.nn.Parameter(torch.zeros(par_shape))
self.beta = torch.nn.Parameter(torch.zeros(par_shape))
self.gamma = torch.nn.Parameter(torch.ones(par_shape))
def forward(self, x):
nu2 = torch.mean(torch.square(x), dim=[2, 3], keepdim=True)
x = x * 1 / torch.sqrt(nu2 + self.eps)
y = self.gamma * x + self.beta
z = torch.max(y, self.tau)
return z
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
TSA_Fusion | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/g4/cg4ol27qewbsblsqindyqcoqjbv3ocrgpr3ueqortiqfpei53c5z.py
# Topologically Sorted Source Nodes: [clone], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# clone => clone
# Graph fragment:
# %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%select,), kwargs = {})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 1024
x1 = (xindex // 1024)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2048 + x0 + (5120*x1)), None)
tl.store(out_ptr0 + (x2), tmp0, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/vl/cvlmoerlrbmnehdmef3bbge55w43r7yeghhzhrdh2czvthybjclb.py
# Topologically Sorted Source Nodes: [emb_ref], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# emb_ref => convolution
# Graph fragment:
# %convolution : [num_users=6] = call_function[target=torch.ops.aten.convolution.default](args = (%clone, %primals_2, %primals_3, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 16) % 64
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/in/cinfzueyyyniakvywqmnsv3rq6nal3xyzxhsxtrblzrtqg3xc4w6.py
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%view, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 20480
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 16) % 64
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/6g/c6g5w2xfkgqh3jcdcbb55u57ppgqo3xomq7ralymlbohawjrlbf7.py
# Topologically Sorted Source Nodes: [mul, sum_1, mul_1, sum_2, mul_2, sum_3, mul_3, sum_4, mul_4, sum_5, cat], Original ATen: [aten.mul, aten.sum, aten.cat]
# Source node to ATen node mapping:
# cat => cat
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# mul_3 => mul_3
# mul_4 => mul_4
# sum_1 => sum_1
# sum_2 => sum_2
# sum_3 => sum_3
# sum_4 => sum_4
# sum_5 => sum_5
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_1, %convolution), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_2, %convolution), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [1]), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_3, %convolution), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_2, [1]), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_4, %convolution), kwargs = {})
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_3, [1]), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select_5, %convolution), kwargs = {})
# %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_4, [1]), kwargs = {})
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%unsqueeze, %unsqueeze_1, %unsqueeze_2, %unsqueeze_3, %unsqueeze_4], 1), kwargs = {})
triton_per_fused_cat_mul_sum_3 = async_compile.triton('triton_per_fused_cat_mul_sum_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[64, 64],
reduction_hint=ReductionHint.OUTER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_cat_mul_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 5, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_cat_mul_sum_3(in_ptr0, in_ptr1, out_ptr5, out_ptr6, out_ptr7, out_ptr8, out_ptr9, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 64
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 16
x1 = (xindex // 16)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*r2) + (5120*x1)), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (x0 + (16*r2) + (1024*x1)), xmask, other=0.0)
tmp7 = tl.load(in_ptr0 + (1024 + x0 + (16*r2) + (5120*x1)), xmask, other=0.0)
tmp13 = tl.load(in_ptr0 + (2048 + x0 + (16*r2) + (5120*x1)), xmask, other=0.0)
tmp19 = tl.load(in_ptr0 + (3072 + x0 + (16*r2) + (5120*x1)), xmask, other=0.0)
tmp25 = tl.load(in_ptr0 + (4096 + x0 + (16*r2) + (5120*x1)), xmask, other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp8 = tmp7 * tmp1
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.where(xmask, tmp9, 0)
tmp12 = tl.sum(tmp11, 1)[:, None]
tmp14 = tmp13 * tmp1
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp20 = tmp19 * tmp1
tmp21 = tl.broadcast_to(tmp20, [XBLOCK, RBLOCK])
tmp23 = tl.where(xmask, tmp21, 0)
tmp24 = tl.sum(tmp23, 1)[:, None]
tmp26 = tmp25 * tmp1
tmp27 = tl.broadcast_to(tmp26, [XBLOCK, RBLOCK])
tmp29 = tl.where(xmask, tmp27, 0)
tmp30 = tl.sum(tmp29, 1)[:, None]
tl.store(out_ptr5 + (x0 + (80*x1)), tmp6, xmask)
tl.store(out_ptr6 + (x0 + (80*x1)), tmp12, xmask)
tl.store(out_ptr7 + (x0 + (80*x1)), tmp18, xmask)
tl.store(out_ptr8 + (x0 + (80*x1)), tmp24, xmask)
tl.store(out_ptr9 + (x0 + (80*x1)), tmp30, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/fh/cfhd3pv6oq22djbxa5tx4y42vjmwgldrhgaichhekhdwb5ize252.py
# Topologically Sorted Source Nodes: [aligned_fea], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# aligned_fea => mul_5
# Graph fragment:
# %mul_5 : [num_users=3] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_3, %view_2), kwargs = {})
triton_poi_fused_mul_4 = async_compile.triton('triton_poi_fused_mul_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 20480
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 16
x1 = (xindex // 16) % 320
x2 = (xindex // 5120)
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x0 + (16*(x1 // 64)) + (80*x2)), None)
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + (x3), tmp3, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/zr/czrki3u23zsgaiiexnna7jtzoedroempx47tvzii26xeuavvtgad.py
# Topologically Sorted Source Nodes: [conv2d_3, att], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# att => gt_1, mul_7, where_1
# conv2d_3 => convolution_3
# Graph fragment:
# %convolution_3 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%mul_5, %primals_8, %primals_9, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_3, 0), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_3, 0.1), kwargs = {})
# %where_1 : [num_users=3] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %convolution_3, %mul_7), kwargs = {})
triton_poi_fused_convolution_leaky_relu_5 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 16) % 64
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(in_out_ptr0 + (x3), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/pk/cpk3fygaseyui7qdpy3xpxqkvxk3hgw7slvnar3gxlbr74q67zf5.py
# Topologically Sorted Source Nodes: [att_max, att_avg], Original ATen: [aten.max_pool2d_with_indices, aten.avg_pool2d]
# Source node to ATen node mapping:
# att_avg => avg_pool2d
# att_max => _low_memory_max_pool2d_with_offsets, getitem_1
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%where_1, [3, 3], [2, 2], [1, 1], [1, 1], False), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
# %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%where_1, [3, 3], [2, 2], [1, 1]), kwargs = {})
triton_poi_fused_avg_pool2d_max_pool2d_with_indices_6 = async_compile.triton('triton_poi_fused_avg_pool2d_max_pool2d_with_indices_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_max_pool2d_with_indices_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 18, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_avg_pool2d_max_pool2d_with_indices_6(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 2) % 2
x0 = xindex % 2
x5 = (xindex // 2)
x3 = (xindex // 256)
x6 = xindex % 256
x7 = xindex
tmp0 = (-1) + (2*x1)
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = (-1) + (2*x0)
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + ((-5) + (2*x0) + (8*x5)), tmp10 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp12 = 2*x0
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + ((-4) + (2*x0) + (8*x5)), tmp16 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + (2*x0)
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + ((-3) + (2*x0) + (8*x5)), tmp23 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = 2*x1
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + ((-1) + (2*x0) + (8*x5)), tmp30 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp32 = triton_helpers.maximum(tmp31, tmp25)
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + ((2*x0) + (8*x5)), tmp33 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp35 = triton_helpers.maximum(tmp34, tmp32)
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (1 + (2*x0) + (8*x5)), tmp36 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp38 = triton_helpers.maximum(tmp37, tmp35)
tmp39 = 1 + (2*x1)
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (3 + (2*x0) + (8*x5)), tmp43 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp45 = triton_helpers.maximum(tmp44, tmp38)
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (4 + (2*x0) + (8*x5)), tmp46 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp48 = triton_helpers.maximum(tmp47, tmp45)
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (5 + (2*x0) + (8*x5)), tmp49 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp51 = triton_helpers.maximum(tmp50, tmp48)
tmp52 = tmp17 > tmp11
tmp53 = tl.full([1], 1, tl.int8)
tmp54 = tl.full([1], 0, tl.int8)
tmp55 = tl.where(tmp52, tmp53, tmp54)
tmp56 = tmp24 > tmp18
tmp57 = tl.full([1], 2, tl.int8)
tmp58 = tl.where(tmp56, tmp57, tmp55)
tmp59 = tmp31 > tmp25
tmp60 = tl.full([1], 3, tl.int8)
tmp61 = tl.where(tmp59, tmp60, tmp58)
tmp62 = tmp34 > tmp32
tmp63 = tl.full([1], 4, tl.int8)
tmp64 = tl.where(tmp62, tmp63, tmp61)
tmp65 = tmp37 > tmp35
tmp66 = tl.full([1], 5, tl.int8)
tmp67 = tl.where(tmp65, tmp66, tmp64)
tmp68 = tmp44 > tmp38
tmp69 = tl.full([1], 6, tl.int8)
tmp70 = tl.where(tmp68, tmp69, tmp67)
tmp71 = tmp47 > tmp45
tmp72 = tl.full([1], 7, tl.int8)
tmp73 = tl.where(tmp71, tmp72, tmp70)
tmp74 = tmp50 > tmp48
tmp75 = tl.full([1], 8, tl.int8)
tmp76 = tl.where(tmp74, tmp75, tmp73)
tmp77 = tl.load(in_ptr0 + ((-5) + (2*x0) + (8*x5)), tmp10 & xmask, eviction_policy='evict_last', other=0.0)
tmp78 = tl.load(in_ptr0 + ((-4) + (2*x0) + (8*x5)), tmp16 & xmask, eviction_policy='evict_last', other=0.0)
tmp79 = tmp78 + tmp77
tmp80 = tl.load(in_ptr0 + ((-3) + (2*x0) + (8*x5)), tmp23 & xmask, eviction_policy='evict_last', other=0.0)
tmp81 = tmp80 + tmp79
tmp82 = tl.load(in_ptr0 + ((-1) + (2*x0) + (8*x5)), tmp30 & xmask, eviction_policy='evict_last', other=0.0)
tmp83 = tmp82 + tmp81
tmp84 = tl.load(in_ptr0 + ((2*x0) + (8*x5)), tmp33 & xmask, eviction_policy='evict_last', other=0.0)
tmp85 = tmp84 + tmp83
tmp86 = tl.load(in_ptr0 + (1 + (2*x0) + (8*x5)), tmp36 & xmask, eviction_policy='evict_last', other=0.0)
tmp87 = tmp86 + tmp85
tmp88 = tl.load(in_ptr0 + (3 + (2*x0) + (8*x5)), tmp43 & xmask, eviction_policy='evict_last', other=0.0)
tmp89 = tmp88 + tmp87
tmp90 = tl.load(in_ptr0 + (4 + (2*x0) + (8*x5)), tmp46 & xmask, eviction_policy='evict_last', other=0.0)
tmp91 = tmp90 + tmp89
tmp92 = tl.load(in_ptr0 + (5 + (2*x0) + (8*x5)), tmp49 & xmask, eviction_policy='evict_last', other=0.0)
tmp93 = tmp92 + tmp91
tmp94 = 1 + ((-2)*x0) + ((-2)*x1) + (((5) * ((5) <= (2 + (2*x0))) + (2 + (2*x0)) * ((2 + (2*x0)) < (5)))*((5) * ((5) <= (2 + (2*x1))) + (2 + (2*x1)) * ((2 + (2*x1)) < (5)))) + ((-2)*x0*((5) * ((5) <= (2 + (2*x1))) + (2 + (2*x1)) * ((2 + (2*x1)) < (5)))) + ((-2)*x1*((5) * ((5) <= (2 + (2*x0))) + (2 + (2*x0)) * ((2 + (2*x0)) < (5)))) + (4*x0*x1) + ((5) * ((5) <= (2 + (2*x0))) + (2 + (2*x0)) * ((2 + (2*x0)) < (5))) + ((5) * ((5) <= (2 + (2*x1))) + (2 + (2*x1)) * ((2 + (2*x1)) < (5)))
tmp95 = tmp93 / tmp94
tl.store(out_ptr0 + (x6 + (512*x3)), tmp51, xmask)
tl.store(out_ptr1 + (x7), tmp76, xmask)
tl.store(out_ptr2 + (x6 + (512*x3)), tmp95, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/ku/ckudte3yk3k5yua5cn4wgvpz6fzasnpftrdhrse4k2pj5bssbxy3.py
# Topologically Sorted Source Nodes: [conv2d_4, att_1], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# att_1 => gt_2, mul_8, where_2
# conv2d_4 => convolution_4
# Graph fragment:
# %convolution_4 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_1, %primals_10, %primals_11, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_2 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_4, 0), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_4, 0.1), kwargs = {})
# %where_2 : [num_users=3] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %convolution_4, %mul_8), kwargs = {})
triton_poi_fused_convolution_leaky_relu_7 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 64
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(in_out_ptr0 + (x3), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/sg/csgczegfowupuwjczr4ywjw7hwfokovqexu34tu4cf2odny25h7r.py
# Topologically Sorted Source Nodes: [att_max_1, att_avg_1], Original ATen: [aten.max_pool2d_with_indices, aten.avg_pool2d]
# Source node to ATen node mapping:
# att_avg_1 => avg_pool2d_1
# att_max_1 => _low_memory_max_pool2d_with_offsets_1, getitem_3
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets_1 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%where_3, [3, 3], [2, 2], [1, 1], [1, 1], False), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
# %avg_pool2d_1 : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%where_3, [3, 3], [2, 2], [1, 1]), kwargs = {})
triton_poi_fused_avg_pool2d_max_pool2d_with_indices_8 = async_compile.triton('triton_poi_fused_avg_pool2d_max_pool2d_with_indices_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_max_pool2d_with_indices_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 18, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_avg_pool2d_max_pool2d_with_indices_8(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
x1 = (xindex // 64)
tmp0 = tl.full([1], -1, tl.int64)
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 2, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = tmp5 & tmp5
tmp7 = tl.load(in_ptr0 + ((-3) + (4*x2)), tmp6 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp8 = tmp1 >= tmp1
tmp9 = tmp1 < tmp3
tmp10 = tmp8 & tmp9
tmp11 = tmp5 & tmp10
tmp12 = tl.load(in_ptr0 + ((-2) + (4*x2)), tmp11 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp13 = triton_helpers.maximum(tmp12, tmp7)
tmp14 = tl.full([1], 1, tl.int64)
tmp15 = tmp14 >= tmp1
tmp16 = tmp14 < tmp3
tmp17 = tmp15 & tmp16
tmp18 = tmp5 & tmp17
tmp19 = tl.load(in_ptr0 + ((-1) + (4*x2)), tmp18 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp20 = triton_helpers.maximum(tmp19, tmp13)
tmp21 = tmp10 & tmp5
tmp22 = tl.load(in_ptr0 + ((-1) + (4*x2)), tmp21 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp23 = triton_helpers.maximum(tmp22, tmp20)
tmp24 = tmp10 & tmp10
tmp25 = tl.load(in_ptr0 + (4*x2), tmp24 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp26 = triton_helpers.maximum(tmp25, tmp23)
tmp27 = tmp10 & tmp17
tmp28 = tl.load(in_ptr0 + (1 + (4*x2)), tmp27 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp29 = triton_helpers.maximum(tmp28, tmp26)
tmp30 = tmp17 & tmp5
tmp31 = tl.load(in_ptr0 + (1 + (4*x2)), tmp30 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp32 = triton_helpers.maximum(tmp31, tmp29)
tmp33 = tmp17 & tmp10
tmp34 = tl.load(in_ptr0 + (2 + (4*x2)), tmp33 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp35 = triton_helpers.maximum(tmp34, tmp32)
tmp36 = tmp17 & tmp17
tmp37 = tl.load(in_ptr0 + (3 + (4*x2)), tmp36 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp38 = triton_helpers.maximum(tmp37, tmp35)
tmp39 = tmp12 > tmp7
tmp40 = tl.full([1], 1, tl.int8)
tmp41 = tl.full([1], 0, tl.int8)
tmp42 = tl.where(tmp39, tmp40, tmp41)
tmp43 = tmp19 > tmp13
tmp44 = tl.full([1], 2, tl.int8)
tmp45 = tl.where(tmp43, tmp44, tmp42)
tmp46 = tmp22 > tmp20
tmp47 = tl.full([1], 3, tl.int8)
tmp48 = tl.where(tmp46, tmp47, tmp45)
tmp49 = tmp25 > tmp23
tmp50 = tl.full([1], 4, tl.int8)
tmp51 = tl.where(tmp49, tmp50, tmp48)
tmp52 = tmp28 > tmp26
tmp53 = tl.full([1], 5, tl.int8)
tmp54 = tl.where(tmp52, tmp53, tmp51)
tmp55 = tmp31 > tmp29
tmp56 = tl.full([1], 6, tl.int8)
tmp57 = tl.where(tmp55, tmp56, tmp54)
tmp58 = tmp34 > tmp32
tmp59 = tl.full([1], 7, tl.int8)
tmp60 = tl.where(tmp58, tmp59, tmp57)
tmp61 = tmp37 > tmp35
tmp62 = tl.full([1], 8, tl.int8)
tmp63 = tl.where(tmp61, tmp62, tmp60)
tmp64 = tl.load(in_ptr0 + ((-3) + (4*x2)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp65 = tl.load(in_ptr0 + ((-2) + (4*x2)), tmp11 & xmask, eviction_policy='evict_last', other=0.0)
tmp66 = tmp65 + tmp64
tmp67 = tl.load(in_ptr0 + ((-1) + (4*x2)), tmp18 & xmask, eviction_policy='evict_last', other=0.0)
tmp68 = tmp67 + tmp66
tmp69 = tl.load(in_ptr0 + ((-1) + (4*x2)), tmp21 & xmask, eviction_policy='evict_last', other=0.0)
tmp70 = tmp69 + tmp68
tmp71 = tl.load(in_ptr0 + (4*x2), tmp24 & xmask, eviction_policy='evict_last', other=0.0)
tmp72 = tmp71 + tmp70
tmp73 = tl.load(in_ptr0 + (1 + (4*x2)), tmp27 & xmask, eviction_policy='evict_last', other=0.0)
tmp74 = tmp73 + tmp72
tmp75 = tl.load(in_ptr0 + (1 + (4*x2)), tmp30 & xmask, eviction_policy='evict_last', other=0.0)
tmp76 = tmp75 + tmp74
tmp77 = tl.load(in_ptr0 + (2 + (4*x2)), tmp33 & xmask, eviction_policy='evict_last', other=0.0)
tmp78 = tmp77 + tmp76
tmp79 = tl.load(in_ptr0 + (3 + (4*x2)), tmp36 & xmask, eviction_policy='evict_last', other=0.0)
tmp80 = tmp79 + tmp78
tmp81 = tl.full([1], 9, tl.int32)
tmp82 = tmp80 / tmp81
tl.store(out_ptr0 + (x0 + (128*x1)), tmp38, xmask)
tl.store(out_ptr1 + (x2), tmp63, xmask)
tl.store(out_ptr2 + (x0 + (128*x1)), tmp82, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/wq/cwqagsj5ls25hrcw6hxsayqci33xusemulwfozklrduzjqzpvbdb.py
# Topologically Sorted Source Nodes: [conv2d_6, att_L_1], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# att_L_1 => gt_4, mul_10, where_4
# conv2d_6 => convolution_6
# Graph fragment:
# %convolution_6 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_2, %primals_14, %primals_15, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_4 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_6, 0), kwargs = {})
# %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_6, 0.1), kwargs = {})
# %where_4 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_4, %convolution_6, %mul_10), kwargs = {})
triton_poi_fused_convolution_leaky_relu_9 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(in_out_ptr0 + (x2), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/l4/cl4vcjd4z7lumtxc5zcpws7ce6eexgpl5gregarvkbnjzwfii7gk.py
# Topologically Sorted Source Nodes: [att_L_3], Original ATen: [aten._to_copy]
# Source node to ATen node mapping:
# att_L_3 => convert_element_type_1
# Graph fragment:
# %convert_element_type_1 : [num_users=5] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view_4, torch.int64), kwargs = {})
triton_poi_fused__to_copy_10 = async_compile.triton('triton_poi_fused__to_copy_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_10(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/vi/cvixasqvjpzhra4mkzvqpwqtena4rblcmdqim6ofp3nmxkli5cho.py
# Topologically Sorted Source Nodes: [att_L_3], Original ATen: [aten.add, aten.clamp]
# Source node to ATen node mapping:
# att_L_3 => add_1, clamp_max
# Graph fragment:
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_1, 1), kwargs = {})
# %clamp_max : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%add_1, 0), kwargs = {})
triton_poi_fused_add_clamp_11 = async_compile.triton('triton_poi_fused_add_clamp_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_11(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.full([1], 1, tl.int64)
tmp10 = tmp8 + tmp9
tmp11 = tl.full([1], 0, tl.int64)
tmp12 = triton_helpers.minimum(tmp10, tmp11)
tl.store(out_ptr0 + (x0), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/dw/cdwvjjvjx5yjaylq4q7psjgmnhvskuynevkz7t3bpyhxzjigsatv.py
# Topologically Sorted Source Nodes: [att_L_3], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
# Source node to ATen node mapping:
# att_L_3 => add, clamp_max_2, clamp_min, clamp_min_2, convert_element_type, iota, mul_12, sub, sub_2
# Graph fragment:
# %iota : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (2,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota, torch.float32), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type, 0.5), kwargs = {})
# %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 0.5), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_12, 0.5), kwargs = {})
# %clamp_min : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub, 0.0), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min, %convert_element_type_3), kwargs = {})
# %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_2, 0.0), kwargs = {})
# %clamp_max_2 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_2, 1.0), kwargs = {})
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_12 = async_compile.triton('triton_poi_fused__to_copy_add_arange_clamp_mul_sub_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_clamp_mul_sub_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_12(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 - tmp9
tmp11 = triton_helpers.maximum(tmp10, tmp6)
tmp12 = 1.0
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tl.store(out_ptr0 + (x0), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/g7/cg7hwas3ulxmdn6h36bo5ewdukiqxglk6whwhnhy37eovq7koydc.py
# Topologically Sorted Source Nodes: [conv2d_7, att_L_2, att_L_3, conv2d_8, att_2, att_3], Original ATen: [aten.convolution, aten.leaky_relu, aten._unsafe_index, aten.sub, aten.mul, aten.add, aten.leaky_relu_backward]
# Source node to ATen node mapping:
# att_2 => gt_6, mul_17, where_6
# att_3 => add_7
# att_L_2 => gt_5, mul_11, where_5
# att_L_3 => _unsafe_index, _unsafe_index_1, _unsafe_index_2, _unsafe_index_3, add_4, add_5, add_6, mul_14, mul_15, mul_16, sub_3, sub_4, sub_6
# conv2d_7 => convolution_7
# conv2d_8 => convolution_8
# Graph fragment:
# %convolution_7 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_4, %primals_16, %primals_17, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_5 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_7, 0), kwargs = {})
# %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_7, 0.1), kwargs = {})
# %where_5 : [num_users=5] = call_function[target=torch.ops.aten.where.self](args = (%gt_5, %convolution_7, %mul_11), kwargs = {})
# %_unsafe_index : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_5, [None, None, %convert_element_type_1, %convert_element_type_3]), kwargs = {})
# %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_5, [None, None, %convert_element_type_1, %clamp_max_1]), kwargs = {})
# %_unsafe_index_2 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_5, [None, None, %clamp_max, %convert_element_type_3]), kwargs = {})
# %_unsafe_index_3 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_5, [None, None, %clamp_max, %clamp_max_1]), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_1, %_unsafe_index), kwargs = {})
# %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %clamp_max_2), kwargs = {})
# %add_4 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index, %mul_14), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_3, %_unsafe_index_2), kwargs = {})
# %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, %clamp_max_2), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_2, %mul_15), kwargs = {})
# %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_5, %add_4), kwargs = {})
# %mul_16 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_6, %clamp_max_3), kwargs = {})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_4, %mul_16), kwargs = {})
# %convolution_8 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_2, %primals_18, %primals_19, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_6 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_8, 0), kwargs = {})
# %mul_17 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_8, 0.1), kwargs = {})
# %where_6 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_6, %convolution_8, %mul_17), kwargs = {})
# %add_7 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_6, %add_6), kwargs = {})
# %gt_11 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%where_6, 0), kwargs = {})
triton_poi_fused__unsafe_index_add_convolution_leaky_relu_leaky_relu_backward_mul_sub_13 = async_compile.triton('triton_poi_fused__unsafe_index_add_convolution_leaky_relu_leaky_relu_backward_mul_sub_13', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*i64', 3: '*fp32', 4: '*fp32', 5: '*i64', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*i64', 10: '*fp32', 11: '*i1', 12: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_add_convolution_leaky_relu_leaky_relu_backward_mul_sub_13', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_leaky_relu_backward_mul_sub_13(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 2) % 2
x0 = xindex % 2
x5 = (xindex // 4)
x2 = (xindex // 4) % 64
x6 = xindex
tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + (x5), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (x2), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr6 + (x6), xmask)
tmp26 = tl.load(in_ptr7 + (x2), xmask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr8 + (x1), xmask, eviction_policy='evict_last')
tmp36 = tl.load(in_ptr9 + (x1), xmask, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 1, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp11 = tmp9 + tmp10
tmp12 = 0.0
tmp13 = tmp11 > tmp12
tmp14 = 0.1
tmp15 = tmp11 * tmp14
tmp16 = tl.where(tmp13, tmp11, tmp15)
tmp18 = tmp17 + tmp1
tmp19 = tmp17 < 0
tmp20 = tl.where(tmp19, tmp18, tmp17)
tmp21 = tmp16 - tmp16
tmp23 = tmp21 * tmp22
tmp24 = tmp16 + tmp23
tmp27 = tmp25 + tmp26
tmp28 = tmp27 > tmp12
tmp29 = tmp27 * tmp14
tmp30 = tl.where(tmp28, tmp27, tmp29)
tmp32 = tmp31 + tmp1
tmp33 = tmp31 < 0
tmp34 = tl.where(tmp33, tmp32, tmp31)
tmp35 = tmp24 - tmp24
tmp37 = tmp35 * tmp36
tmp38 = tmp24 + tmp37
tmp39 = tmp30 + tmp38
tmp40 = tmp30 > tmp12
tl.store(in_out_ptr0 + (x6), tmp39, xmask)
tl.store(out_ptr0 + (x6), tmp40, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/qh/cqhre475mhrzai26fnzznz5at2t325ucwdj2hqvrn3rxtfvbapzo.py
# Topologically Sorted Source Nodes: [att_5], Original ATen: [aten._to_copy]
# Source node to ATen node mapping:
# att_5 => convert_element_type_5
# Graph fragment:
# %convert_element_type_5 : [num_users=5] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view_6, torch.int64), kwargs = {})
triton_poi_fused__to_copy_14 = async_compile.triton('triton_poi_fused__to_copy_14', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_14', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_14(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/5f/c5fjkguhvjg5ryun7wopg6renfax5rp23vfbg6nzsu7akebanlci.py
# Topologically Sorted Source Nodes: [att_5], Original ATen: [aten.add, aten.clamp]
# Source node to ATen node mapping:
# att_5 => add_9, clamp_max_4
# Graph fragment:
# %add_9 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_5, 1), kwargs = {})
# %clamp_max_4 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%add_9, 1), kwargs = {})
triton_poi_fused_add_clamp_15 = async_compile.triton('triton_poi_fused_add_clamp_15', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_15', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_15(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.full([1], 1, tl.int64)
tmp10 = tmp8 + tmp9
tmp11 = triton_helpers.minimum(tmp10, tmp9)
tl.store(out_ptr0 + (x0), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/ae/caebye2u374vhzlpesqh72pu5msuyvgx2qnngs7zftzquvm3h3mg.py
# Topologically Sorted Source Nodes: [att_5], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
# Source node to ATen node mapping:
# att_5 => add_8, clamp_max_6, clamp_min_4, clamp_min_6, convert_element_type_4, iota_2, mul_19, sub_7, sub_9
# Graph fragment:
# %iota_2 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %convert_element_type_4 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota_2, torch.float32), kwargs = {})
# %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_4, 0.5), kwargs = {})
# %mul_19 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_8, 0.5), kwargs = {})
# %sub_7 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_19, 0.5), kwargs = {})
# %clamp_min_4 : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_7, 0.0), kwargs = {})
# %sub_9 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min_4, %convert_element_type_7), kwargs = {})
# %clamp_min_6 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_9, 0.0), kwargs = {})
# %clamp_max_6 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_6, 1.0), kwargs = {})
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_16 = async_compile.triton('triton_poi_fused__to_copy_add_arange_clamp_mul_sub_16', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_clamp_mul_sub_16', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_16(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 - tmp9
tmp11 = triton_helpers.maximum(tmp10, tmp6)
tmp12 = 1.0
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tl.store(out_ptr0 + (x0), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/5d/c5digbxcc3yvdlkmff5azrziozddchj6yb3sq5xkpjinfocpzrk4.py
# Topologically Sorted Source Nodes: [conv2d_9, att_4, att_5], Original ATen: [aten.convolution, aten.leaky_relu, aten._unsafe_index, aten.sub, aten.mul, aten.add]
# Source node to ATen node mapping:
# att_4 => gt_7, mul_18, where_7
# att_5 => _unsafe_index_4, _unsafe_index_5, _unsafe_index_6, _unsafe_index_7, add_12, add_13, add_14, mul_21, mul_22, mul_23, sub_10, sub_11, sub_13
# conv2d_9 => convolution_9
# Graph fragment:
# %convolution_9 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%add_7, %primals_20, %primals_21, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_7 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_9, 0), kwargs = {})
# %mul_18 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_9, 0.1), kwargs = {})
# %where_7 : [num_users=5] = call_function[target=torch.ops.aten.where.self](args = (%gt_7, %convolution_9, %mul_18), kwargs = {})
# %_unsafe_index_4 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_7, [None, None, %convert_element_type_5, %convert_element_type_7]), kwargs = {})
# %_unsafe_index_5 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_7, [None, None, %convert_element_type_5, %clamp_max_5]), kwargs = {})
# %_unsafe_index_6 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_7, [None, None, %clamp_max_4, %convert_element_type_7]), kwargs = {})
# %_unsafe_index_7 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_7, [None, None, %clamp_max_4, %clamp_max_5]), kwargs = {})
# %sub_10 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_5, %_unsafe_index_4), kwargs = {})
# %mul_21 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_10, %clamp_max_6), kwargs = {})
# %add_12 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_4, %mul_21), kwargs = {})
# %sub_11 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_7, %_unsafe_index_6), kwargs = {})
# %mul_22 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_11, %clamp_max_6), kwargs = {})
# %add_13 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_6, %mul_22), kwargs = {})
# %sub_13 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_13, %add_12), kwargs = {})
# %mul_23 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_13, %clamp_max_7), kwargs = {})
# %add_14 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_12, %mul_23), kwargs = {})
triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_17 = async_compile.triton('triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_17', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*i64', 3: '*fp32', 4: '*fp32', 5: '*i64', 6: '*fp32', 7: '*i64', 8: '*fp32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_17', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_17(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 4) % 4
x0 = xindex % 4
x6 = (xindex // 16)
x2 = (xindex // 16) % 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (x2), None, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr5 + (x0), None, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr6 + (x1), None, eviction_policy='evict_last')
tmp48 = tl.load(in_ptr7 + (x1), None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 2, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + (2*tmp4) + (4*x6)), None, eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp12 = 0.0
tmp13 = tmp11 > tmp12
tmp14 = 0.1
tmp15 = tmp11 * tmp14
tmp16 = tl.where(tmp13, tmp11, tmp15)
tmp18 = tmp17 + tmp1
tmp19 = tmp17 < 0
tmp20 = tl.where(tmp19, tmp18, tmp17)
tmp21 = tl.load(in_ptr2 + (tmp20 + (2*tmp4) + (4*x6)), None, eviction_policy='evict_last')
tmp22 = tmp21 + tmp10
tmp23 = tmp22 > tmp12
tmp24 = tmp22 * tmp14
tmp25 = tl.where(tmp23, tmp22, tmp24)
tmp26 = tmp25 - tmp16
tmp28 = tmp26 * tmp27
tmp29 = tmp16 + tmp28
tmp31 = tmp30 + tmp1
tmp32 = tmp30 < 0
tmp33 = tl.where(tmp32, tmp31, tmp30)
tmp34 = tl.load(in_ptr2 + (tmp8 + (2*tmp33) + (4*x6)), None, eviction_policy='evict_last')
tmp35 = tmp34 + tmp10
tmp36 = tmp35 > tmp12
tmp37 = tmp35 * tmp14
tmp38 = tl.where(tmp36, tmp35, tmp37)
tmp39 = tl.load(in_ptr2 + (tmp20 + (2*tmp33) + (4*x6)), None, eviction_policy='evict_last')
tmp40 = tmp39 + tmp10
tmp41 = tmp40 > tmp12
tmp42 = tmp40 * tmp14
tmp43 = tl.where(tmp41, tmp40, tmp42)
tmp44 = tmp43 - tmp38
tmp45 = tmp44 * tmp27
tmp46 = tmp38 + tmp45
tmp47 = tmp46 - tmp29
tmp49 = tmp47 * tmp48
tmp50 = tmp29 + tmp49
tl.store(in_out_ptr0 + (x4), tmp50, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/cu/ccuvxkf5qhj2jvrsbb3ffhmjd2jtqb6hmsyda6rq3u6bfora32rr.py
# Topologically Sorted Source Nodes: [conv2d_2, fea, att_add, att_7, mul_6, mul_7, fea_1], Original ATen: [aten.convolution, aten.leaky_relu, aten.sigmoid, aten.mul, aten.add]
# Source node to ATen node mapping:
# att_7 => sigmoid_1
# att_add => convolution_12
# conv2d_2 => convolution_2
# fea => gt, mul_6, where
# fea_1 => add_15
# mul_6 => mul_25
# mul_7 => mul_26
# Graph fragment:
# %convolution_2 : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%mul_5, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_2, 0), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_2, 0.1), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution_2, %mul_6), kwargs = {})
# %convolution_12 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%where_8, %primals_26, %primals_27, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %sigmoid_1 : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_10,), kwargs = {})
# %mul_25 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where, %sigmoid_1), kwargs = {})
# %mul_26 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_25, 2), kwargs = {})
# %add_15 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_26, %convolution_12), kwargs = {})
triton_poi_fused_add_convolution_leaky_relu_mul_sigmoid_18 = async_compile.triton('triton_poi_fused_add_convolution_leaky_relu_mul_sigmoid_18', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_leaky_relu_mul_sigmoid_18', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_leaky_relu_mul_sigmoid_18(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 16) % 64
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (x3), None)
tmp13 = tl.load(in_out_ptr1 + (x3), None)
tmp14 = tl.load(in_ptr2 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp9 = tl.sigmoid(tmp8)
tmp10 = tmp7 * tmp9
tmp11 = 2.0
tmp12 = tmp10 * tmp11
tmp15 = tmp13 + tmp14
tmp16 = tmp12 + tmp15
tl.store(in_out_ptr0 + (x3), tmp2, None)
tl.store(in_out_ptr1 + (x3), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/74/c744ryizhtwhrucrt6eo7euxmid6gpfdi3fhwvvcyslcqrxawzy3.py
# Topologically Sorted Source Nodes: [conv2d_9, att_4], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
# Source node to ATen node mapping:
# att_4 => gt_7, mul_18, where_7
# conv2d_9 => convolution_9
# Graph fragment:
# %convolution_9 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%add_7, %primals_20, %primals_21, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_7 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_9, 0), kwargs = {})
# %mul_18 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_9, 0.1), kwargs = {})
# %where_7 : [num_users=5] = call_function[target=torch.ops.aten.where.self](args = (%gt_7, %convolution_9, %mul_18), kwargs = {})
# %gt_10 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%where_7, 0), kwargs = {})
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_19 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_19', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_19', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_19(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 64
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tmp7 > tmp3
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/ey/ceyyqukhyyygq34vtv7g5xckv5mooqbd7qwq2qatahqa4c2so7gc.py
# Topologically Sorted Source Nodes: [conv2d_7, att_L_2], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
# Source node to ATen node mapping:
# att_L_2 => gt_5, mul_11, where_5
# conv2d_7 => convolution_7
# Graph fragment:
# %convolution_7 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_4, %primals_16, %primals_17, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_5 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_7, 0), kwargs = {})
# %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_7, 0.1), kwargs = {})
# %where_5 : [num_users=5] = call_function[target=torch.ops.aten.where.self](args = (%gt_5, %convolution_7, %mul_11), kwargs = {})
# %gt_12 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%where_5, 0), kwargs = {})
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_20 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_20', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_20', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_20(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tmp7 > tmp3
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27 = args
args.clear()
assert_size_stride(primals_1, (4, 5, 64, 4, 4), (5120, 1024, 16, 4, 1))
assert_size_stride(primals_2, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_3, (64, ), (1, ))
assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (64, ), (1, ))
assert_size_stride(primals_6, (64, 320, 1, 1), (320, 1, 1, 1))
assert_size_stride(primals_7, (64, ), (1, ))
assert_size_stride(primals_8, (64, 320, 1, 1), (320, 1, 1, 1))
assert_size_stride(primals_9, (64, ), (1, ))
assert_size_stride(primals_10, (64, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_11, (64, ), (1, ))
assert_size_stride(primals_12, (64, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_13, (64, ), (1, ))
assert_size_stride(primals_14, (64, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_15, (64, ), (1, ))
assert_size_stride(primals_16, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_17, (64, ), (1, ))
assert_size_stride(primals_18, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_19, (64, ), (1, ))
assert_size_stride(primals_20, (64, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_21, (64, ), (1, ))
assert_size_stride(primals_22, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_23, (64, ), (1, ))
assert_size_stride(primals_24, (64, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_25, (64, ), (1, ))
assert_size_stride(primals_26, (64, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_27, (64, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 64, 4, 4), (1024, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [clone], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(primals_1, buf0, 4096, grid=grid(4096), stream=stream0)
# Topologically Sorted Source Nodes: [emb_ref], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 64, 4, 4), (1024, 16, 4, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [emb_ref], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf2, primals_3, 4096, grid=grid(4096), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(reinterpret_tensor(primals_1, (20, 64, 4, 4), (1024, 16, 4, 1), 0), primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (20, 64, 4, 4), (1024, 16, 4, 1))
buf4 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf4, primals_5, 20480, grid=grid(20480), stream=stream0)
del primals_5
buf15 = empty_strided_cuda((4, 5, 4, 4), (80, 16, 4, 1), torch.float32)
buf10 = reinterpret_tensor(buf15, (4, 1, 4, 4), (80, 16, 4, 1), 0) # alias
buf11 = reinterpret_tensor(buf15, (4, 1, 4, 4), (80, 16, 4, 1), 16) # alias
buf12 = reinterpret_tensor(buf15, (4, 1, 4, 4), (80, 16, 4, 1), 32) # alias
buf13 = reinterpret_tensor(buf15, (4, 1, 4, 4), (80, 16, 4, 1), 48) # alias
buf14 = reinterpret_tensor(buf15, (4, 1, 4, 4), (80, 16, 4, 1), 64) # alias
# Topologically Sorted Source Nodes: [mul, sum_1, mul_1, sum_2, mul_2, sum_3, mul_3, sum_4, mul_4, sum_5, cat], Original ATen: [aten.mul, aten.sum, aten.cat]
triton_per_fused_cat_mul_sum_3.run(buf4, buf2, buf10, buf11, buf12, buf13, buf14, 64, 64, grid=grid(64), stream=stream0)
buf16 = empty_strided_cuda((4, 320, 4, 4), (5120, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [aligned_fea], Original ATen: [aten.mul]
triton_poi_fused_mul_4.run(primals_1, buf15, buf16, 20480, grid=grid(20480), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf17 = extern_kernels.convolution(buf16, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf17, (4, 64, 4, 4), (1024, 16, 4, 1))
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf19 = extern_kernels.convolution(buf16, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf19, (4, 64, 4, 4), (1024, 16, 4, 1))
buf20 = buf19; del buf19 # reuse
# Topologically Sorted Source Nodes: [conv2d_3, att], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_5.run(buf20, primals_9, 4096, grid=grid(4096), stream=stream0)
del primals_9
buf24 = empty_strided_cuda((4, 128, 2, 2), (512, 4, 2, 1), torch.float32)
buf21 = reinterpret_tensor(buf24, (4, 64, 2, 2), (512, 4, 2, 1), 0) # alias
buf22 = empty_strided_cuda((4, 64, 2, 2), (256, 4, 2, 1), torch.int8)
buf23 = reinterpret_tensor(buf24, (4, 64, 2, 2), (512, 4, 2, 1), 256) # alias
# Topologically Sorted Source Nodes: [att_max, att_avg], Original ATen: [aten.max_pool2d_with_indices, aten.avg_pool2d]
triton_poi_fused_avg_pool2d_max_pool2d_with_indices_6.run(buf20, buf21, buf22, buf23, 1024, grid=grid(1024), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution]
buf25 = extern_kernels.convolution(buf24, primals_10, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf25, (4, 64, 2, 2), (256, 4, 2, 1))
buf26 = buf25; del buf25 # reuse
# Topologically Sorted Source Nodes: [conv2d_4, att_1], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_7.run(buf26, primals_11, 1024, grid=grid(1024), stream=stream0)
del primals_11
# Topologically Sorted Source Nodes: [conv2d_5], Original ATen: [aten.convolution]
buf27 = extern_kernels.convolution(buf26, primals_12, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf27, (4, 64, 2, 2), (256, 4, 2, 1))
buf28 = buf27; del buf27 # reuse
# Topologically Sorted Source Nodes: [conv2d_5, att_L], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_7.run(buf28, primals_13, 1024, grid=grid(1024), stream=stream0)
del primals_13
buf32 = empty_strided_cuda((4, 128, 1, 1), (128, 1, 1, 1), torch.float32)
buf29 = reinterpret_tensor(buf32, (4, 64, 1, 1), (128, 1, 1, 1), 0) # alias
buf30 = empty_strided_cuda((4, 64, 1, 1), (64, 1, 1, 1), torch.int8)
buf31 = reinterpret_tensor(buf32, (4, 64, 1, 1), (128, 1, 1, 1), 64) # alias
# Topologically Sorted Source Nodes: [att_max_1, att_avg_1], Original ATen: [aten.max_pool2d_with_indices, aten.avg_pool2d]
triton_poi_fused_avg_pool2d_max_pool2d_with_indices_8.run(buf28, buf29, buf30, buf31, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_6], Original ATen: [aten.convolution]
buf33 = extern_kernels.convolution(buf32, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf33, (4, 64, 1, 1), (64, 1, 1, 1))
buf34 = buf33; del buf33 # reuse
# Topologically Sorted Source Nodes: [conv2d_6, att_L_1], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_9.run(buf34, primals_15, 256, grid=grid(256), stream=stream0)
del primals_15
# Topologically Sorted Source Nodes: [conv2d_7], Original ATen: [aten.convolution]
buf35 = extern_kernels.convolution(buf34, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf35, (4, 64, 1, 1), (64, 1, 1, 1))
buf36 = empty_strided_cuda((2, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [att_L_3], Original ATen: [aten._to_copy]
triton_poi_fused__to_copy_10.run(buf36, 2, grid=grid(2), stream=stream0)
buf37 = empty_strided_cuda((2, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [att_L_3], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_11.run(buf37, 2, grid=grid(2), stream=stream0)
buf38 = empty_strided_cuda((2, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [att_L_3], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
triton_poi_fused__to_copy_10.run(buf38, 2, grid=grid(2), stream=stream0)
buf39 = empty_strided_cuda((2, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [att_L_3], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_11.run(buf39, 2, grid=grid(2), stream=stream0)
buf40 = empty_strided_cuda((2, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [att_L_3], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_12.run(buf40, 2, grid=grid(2), stream=stream0)
buf42 = empty_strided_cuda((2, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [att_L_3], Original ATen: [aten.sub, aten.clamp]
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_12.run(buf42, 2, grid=grid(2), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_8], Original ATen: [aten.convolution]
buf43 = extern_kernels.convolution(buf26, primals_18, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf43, (4, 64, 2, 2), (256, 4, 2, 1))
buf41 = empty_strided_cuda((4, 64, 2, 2), (256, 4, 2, 1), torch.float32)
buf44 = buf41; del buf41 # reuse
buf62 = empty_strided_cuda((4, 64, 2, 2), (256, 4, 2, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_7, att_L_2, att_L_3, conv2d_8, att_2, att_3], Original ATen: [aten.convolution, aten.leaky_relu, aten._unsafe_index, aten.sub, aten.mul, aten.add, aten.leaky_relu_backward]
triton_poi_fused__unsafe_index_add_convolution_leaky_relu_leaky_relu_backward_mul_sub_13.run(buf44, buf36, buf38, buf35, primals_17, buf39, buf40, buf43, primals_19, buf37, buf42, buf62, 1024, grid=grid(1024), stream=stream0)
del buf43
del primals_19
# Topologically Sorted Source Nodes: [conv2d_9], Original ATen: [aten.convolution]
buf45 = extern_kernels.convolution(buf44, primals_20, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf45, (4, 64, 2, 2), (256, 4, 2, 1))
buf46 = empty_strided_cuda((4, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [att_5], Original ATen: [aten._to_copy]
triton_poi_fused__to_copy_14.run(buf46, 4, grid=grid(4), stream=stream0)
buf47 = empty_strided_cuda((4, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [att_5], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_15.run(buf47, 4, grid=grid(4), stream=stream0)
buf48 = empty_strided_cuda((4, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [att_5], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
triton_poi_fused__to_copy_14.run(buf48, 4, grid=grid(4), stream=stream0)
buf49 = empty_strided_cuda((4, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [att_5], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_15.run(buf49, 4, grid=grid(4), stream=stream0)
buf50 = empty_strided_cuda((4, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [att_5], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_16.run(buf50, 4, grid=grid(4), stream=stream0)
buf52 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [att_5], Original ATen: [aten.sub, aten.clamp]
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_16.run(buf52, 4, grid=grid(4), stream=stream0)
buf53 = empty_strided_cuda((4, 64, 4, 4), (1024, 16, 4, 1), torch.float32)
buf54 = buf53; del buf53 # reuse
# Topologically Sorted Source Nodes: [conv2d_9, att_4, att_5], Original ATen: [aten.convolution, aten.leaky_relu, aten._unsafe_index, aten.sub, aten.mul, aten.add]
triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_17.run(buf54, buf46, buf48, buf45, primals_21, buf49, buf50, buf47, buf52, 4096, grid=grid(4096), stream=stream0)
# Topologically Sorted Source Nodes: [att_6], Original ATen: [aten.convolution]
buf55 = extern_kernels.convolution(buf54, primals_22, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf55, (4, 64, 4, 4), (1024, 16, 4, 1))
buf56 = buf55; del buf55 # reuse
# Topologically Sorted Source Nodes: [att_6], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf56, primals_23, 4096, grid=grid(4096), stream=stream0)
del primals_23
# Topologically Sorted Source Nodes: [conv2d_11], Original ATen: [aten.convolution]
buf57 = extern_kernels.convolution(buf56, primals_24, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf57, (4, 64, 4, 4), (1024, 16, 4, 1))
buf58 = buf57; del buf57 # reuse
# Topologically Sorted Source Nodes: [conv2d_11, leaky_relu_8], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_5.run(buf58, primals_25, 4096, grid=grid(4096), stream=stream0)
del primals_25
# Topologically Sorted Source Nodes: [att_add], Original ATen: [aten.convolution]
buf59 = extern_kernels.convolution(buf58, primals_26, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf59, (4, 64, 4, 4), (1024, 16, 4, 1))
buf18 = buf17; del buf17 # reuse
buf60 = buf59; del buf59 # reuse
# Topologically Sorted Source Nodes: [conv2d_2, fea, att_add, att_7, mul_6, mul_7, fea_1], Original ATen: [aten.convolution, aten.leaky_relu, aten.sigmoid, aten.mul, aten.add]
triton_poi_fused_add_convolution_leaky_relu_mul_sigmoid_18.run(buf18, buf60, primals_7, buf56, primals_27, 4096, grid=grid(4096), stream=stream0)
del primals_27
del primals_7
buf61 = empty_strided_cuda((4, 64, 2, 2), (256, 4, 2, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_9, att_4], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_19.run(buf45, primals_21, buf61, 1024, grid=grid(1024), stream=stream0)
del buf45
del primals_21
buf63 = empty_strided_cuda((4, 64, 1, 1), (64, 1, 1, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_7, att_L_2], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_20.run(buf35, primals_17, buf63, 256, grid=grid(256), stream=stream0)
del buf35
del primals_17
return (buf60, primals_1, primals_2, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, primals_24, primals_26, buf0, buf2, reinterpret_tensor(buf4, (4, 64, 4, 4), (5120, 16, 4, 1), 0), reinterpret_tensor(buf4, (4, 64, 4, 4), (5120, 16, 4, 1), 1024), reinterpret_tensor(buf4, (4, 64, 4, 4), (5120, 16, 4, 1), 2048), reinterpret_tensor(buf4, (4, 64, 4, 4), (5120, 16, 4, 1), 3072), reinterpret_tensor(buf4, (4, 64, 4, 4), (5120, 16, 4, 1), 4096), buf15, buf16, buf18, buf20, buf22, buf24, buf26, buf28, buf30, buf32, buf34, buf36, buf37, buf38, buf39, buf40, buf42, buf44, buf46, buf47, buf48, buf49, buf50, buf52, buf54, buf56, buf58, buf61, buf62, buf63, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 5, 64, 4, 4), (5120, 1024, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((64, 320, 1, 1), (320, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((64, 320, 1, 1), (320, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((64, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((64, 64, 1, 1), (64, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((64, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((64, 64, 1, 1), (64, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_21 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_22 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_23 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_24 = rand_strided((64, 64, 1, 1), (64, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_25 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_26 = rand_strided((64, 64, 1, 1), (64, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_27 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
import torch.nn.functional as F
import torch.nn as nn
class TSA_Fusion(nn.Module):
""" Temporal Spatial Attention fusion module
Temporal: correlation;
Spatial: 3 pyramid levels.
"""
def __init__(self, nf=64, nframes=5, center=2):
super(TSA_Fusion, self).__init__()
self.center = center
self.tAtt_1 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.tAtt_2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.fea_fusion = nn.Conv2d(nframes * nf, nf, 1, 1, bias=True)
self.sAtt_1 = nn.Conv2d(nframes * nf, nf, 1, 1, bias=True)
self.maxpool = nn.MaxPool2d(3, stride=2, padding=1)
self.avgpool = nn.AvgPool2d(3, stride=2, padding=1)
self.sAtt_2 = nn.Conv2d(nf * 2, nf, 1, 1, bias=True)
self.sAtt_3 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.sAtt_4 = nn.Conv2d(nf, nf, 1, 1, bias=True)
self.sAtt_5 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.sAtt_L1 = nn.Conv2d(nf, nf, 1, 1, bias=True)
self.sAtt_L2 = nn.Conv2d(nf * 2, nf, 3, 1, 1, bias=True)
self.sAtt_L3 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.sAtt_add_1 = nn.Conv2d(nf, nf, 1, 1, bias=True)
self.sAtt_add_2 = nn.Conv2d(nf, nf, 1, 1, bias=True)
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
def forward(self, aligned_fea):
B, N, C, H, W = aligned_fea.size()
emb_ref = self.tAtt_2(aligned_fea[:, self.center, :, :, :].clone())
emb = self.tAtt_1(aligned_fea.view(-1, C, H, W)).view(B, N, -1, H, W)
cor_l = []
for i in range(N):
emb_nbr = emb[:, i, :, :, :]
cor_tmp = torch.sum(emb_nbr * emb_ref, 1).unsqueeze(1)
cor_l.append(cor_tmp)
cor_prob = torch.sigmoid(torch.cat(cor_l, dim=1))
cor_prob = cor_prob.unsqueeze(2).repeat(1, 1, C, 1, 1).view(B, -1, H, W
)
aligned_fea = aligned_fea.view(B, -1, H, W) * cor_prob
fea = self.lrelu(self.fea_fusion(aligned_fea))
att = self.lrelu(self.sAtt_1(aligned_fea))
att_max = self.maxpool(att)
att_avg = self.avgpool(att)
att = self.lrelu(self.sAtt_2(torch.cat([att_max, att_avg], dim=1)))
att_L = self.lrelu(self.sAtt_L1(att))
att_max = self.maxpool(att_L)
att_avg = self.avgpool(att_L)
att_L = self.lrelu(self.sAtt_L2(torch.cat([att_max, att_avg], dim=1)))
att_L = self.lrelu(self.sAtt_L3(att_L))
att_L = F.interpolate(att_L, scale_factor=2, mode='bilinear',
align_corners=False)
att = self.lrelu(self.sAtt_3(att))
att = att + att_L
att = self.lrelu(self.sAtt_4(att))
att = F.interpolate(att, scale_factor=2, mode='bilinear',
align_corners=False)
att = self.sAtt_5(att)
att_add = self.sAtt_add_2(self.lrelu(self.sAtt_add_1(att)))
att = torch.sigmoid(att)
fea = fea * att * 2 + att_add
return fea
def get_inputs():
return [torch.rand([4, 5, 64, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils.data
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 1024
x1 = xindex // 1024
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2048 + x0 + 5120 * x1), None)
tl.store(out_ptr0 + x2, tmp0, None)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_per_fused_cat_mul_sum_3(in_ptr0, in_ptr1, out_ptr5, out_ptr6,
out_ptr7, out_ptr8, out_ptr9, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 16
x1 = xindex // 16
tmp0 = tl.load(in_ptr0 + (x0 + 16 * r2 + 5120 * x1), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (x0 + 16 * r2 + 1024 * x1), xmask, other=0.0)
tmp7 = tl.load(in_ptr0 + (1024 + x0 + 16 * r2 + 5120 * x1), xmask,
other=0.0)
tmp13 = tl.load(in_ptr0 + (2048 + x0 + 16 * r2 + 5120 * x1), xmask,
other=0.0)
tmp19 = tl.load(in_ptr0 + (3072 + x0 + 16 * r2 + 5120 * x1), xmask,
other=0.0)
tmp25 = tl.load(in_ptr0 + (4096 + x0 + 16 * r2 + 5120 * x1), xmask,
other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp8 = tmp7 * tmp1
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.where(xmask, tmp9, 0)
tmp12 = tl.sum(tmp11, 1)[:, None]
tmp14 = tmp13 * tmp1
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp20 = tmp19 * tmp1
tmp21 = tl.broadcast_to(tmp20, [XBLOCK, RBLOCK])
tmp23 = tl.where(xmask, tmp21, 0)
tmp24 = tl.sum(tmp23, 1)[:, None]
tmp26 = tmp25 * tmp1
tmp27 = tl.broadcast_to(tmp26, [XBLOCK, RBLOCK])
tmp29 = tl.where(xmask, tmp27, 0)
tmp30 = tl.sum(tmp29, 1)[:, None]
tl.store(out_ptr5 + (x0 + 80 * x1), tmp6, xmask)
tl.store(out_ptr6 + (x0 + 80 * x1), tmp12, xmask)
tl.store(out_ptr7 + (x0 + 80 * x1), tmp18, xmask)
tl.store(out_ptr8 + (x0 + 80 * x1), tmp24, xmask)
tl.store(out_ptr9 + (x0 + 80 * x1), tmp30, xmask)
@triton.jit
def triton_poi_fused_mul_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 16
x1 = xindex // 16 % 320
x2 = xindex // 5120
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (x0 + 16 * (x1 // 64) + 80 * x2), None)
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + x3, tmp3, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_5(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(in_out_ptr0 + x3, tmp7, None)
@triton.jit
def triton_poi_fused_avg_pool2d_max_pool2d_with_indices_6(in_ptr0, out_ptr0,
out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 2 % 2
x0 = xindex % 2
x5 = xindex // 2
x3 = xindex // 256
x6 = xindex % 256
x7 = xindex
tmp0 = -1 + 2 * x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -1 + 2 * x0
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (-5 + 2 * x0 + 8 * x5), tmp10 & xmask,
eviction_policy='evict_last', other=float('-inf'))
tmp12 = 2 * x0
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (-4 + 2 * x0 + 8 * x5), tmp16 & xmask,
eviction_policy='evict_last', other=float('-inf'))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + 2 * x0
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + (-3 + 2 * x0 + 8 * x5), tmp23 & xmask,
eviction_policy='evict_last', other=float('-inf'))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = 2 * x1
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + (-1 + 2 * x0 + 8 * x5), tmp30 & xmask,
eviction_policy='evict_last', other=float('-inf'))
tmp32 = triton_helpers.maximum(tmp31, tmp25)
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + (2 * x0 + 8 * x5), tmp33 & xmask,
eviction_policy='evict_last', other=float('-inf'))
tmp35 = triton_helpers.maximum(tmp34, tmp32)
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x5), tmp36 & xmask,
eviction_policy='evict_last', other=float('-inf'))
tmp38 = triton_helpers.maximum(tmp37, tmp35)
tmp39 = 1 + 2 * x1
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (3 + 2 * x0 + 8 * x5), tmp43 & xmask,
eviction_policy='evict_last', other=float('-inf'))
tmp45 = triton_helpers.maximum(tmp44, tmp38)
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x5), tmp46 & xmask,
eviction_policy='evict_last', other=float('-inf'))
tmp48 = triton_helpers.maximum(tmp47, tmp45)
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x5), tmp49 & xmask,
eviction_policy='evict_last', other=float('-inf'))
tmp51 = triton_helpers.maximum(tmp50, tmp48)
tmp52 = tmp17 > tmp11
tmp53 = tl.full([1], 1, tl.int8)
tmp54 = tl.full([1], 0, tl.int8)
tmp55 = tl.where(tmp52, tmp53, tmp54)
tmp56 = tmp24 > tmp18
tmp57 = tl.full([1], 2, tl.int8)
tmp58 = tl.where(tmp56, tmp57, tmp55)
tmp59 = tmp31 > tmp25
tmp60 = tl.full([1], 3, tl.int8)
tmp61 = tl.where(tmp59, tmp60, tmp58)
tmp62 = tmp34 > tmp32
tmp63 = tl.full([1], 4, tl.int8)
tmp64 = tl.where(tmp62, tmp63, tmp61)
tmp65 = tmp37 > tmp35
tmp66 = tl.full([1], 5, tl.int8)
tmp67 = tl.where(tmp65, tmp66, tmp64)
tmp68 = tmp44 > tmp38
tmp69 = tl.full([1], 6, tl.int8)
tmp70 = tl.where(tmp68, tmp69, tmp67)
tmp71 = tmp47 > tmp45
tmp72 = tl.full([1], 7, tl.int8)
tmp73 = tl.where(tmp71, tmp72, tmp70)
tmp74 = tmp50 > tmp48
tmp75 = tl.full([1], 8, tl.int8)
tmp76 = tl.where(tmp74, tmp75, tmp73)
tmp77 = tl.load(in_ptr0 + (-5 + 2 * x0 + 8 * x5), tmp10 & xmask,
eviction_policy='evict_last', other=0.0)
tmp78 = tl.load(in_ptr0 + (-4 + 2 * x0 + 8 * x5), tmp16 & xmask,
eviction_policy='evict_last', other=0.0)
tmp79 = tmp78 + tmp77
tmp80 = tl.load(in_ptr0 + (-3 + 2 * x0 + 8 * x5), tmp23 & xmask,
eviction_policy='evict_last', other=0.0)
tmp81 = tmp80 + tmp79
tmp82 = tl.load(in_ptr0 + (-1 + 2 * x0 + 8 * x5), tmp30 & xmask,
eviction_policy='evict_last', other=0.0)
tmp83 = tmp82 + tmp81
tmp84 = tl.load(in_ptr0 + (2 * x0 + 8 * x5), tmp33 & xmask,
eviction_policy='evict_last', other=0.0)
tmp85 = tmp84 + tmp83
tmp86 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x5), tmp36 & xmask,
eviction_policy='evict_last', other=0.0)
tmp87 = tmp86 + tmp85
tmp88 = tl.load(in_ptr0 + (3 + 2 * x0 + 8 * x5), tmp43 & xmask,
eviction_policy='evict_last', other=0.0)
tmp89 = tmp88 + tmp87
tmp90 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x5), tmp46 & xmask,
eviction_policy='evict_last', other=0.0)
tmp91 = tmp90 + tmp89
tmp92 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x5), tmp49 & xmask,
eviction_policy='evict_last', other=0.0)
tmp93 = tmp92 + tmp91
tmp94 = 1 + -2 * x0 + -2 * x1 + (5 * (5 <= 2 + 2 * x0) + (2 + 2 * x0) *
(2 + 2 * x0 < 5)) * (5 * (5 <= 2 + 2 * x1) + (2 + 2 * x1) * (2 + 2 *
x1 < 5)) + -2 * x0 * (5 * (5 <= 2 + 2 * x1) + (2 + 2 * x1) * (2 + 2 *
x1 < 5)) + -2 * x1 * (5 * (5 <= 2 + 2 * x0) + (2 + 2 * x0) * (2 + 2 *
x0 < 5)) + 4 * x0 * x1 + (5 * (5 <= 2 + 2 * x0) + (2 + 2 * x0) * (2 +
2 * x0 < 5)) + (5 * (5 <= 2 + 2 * x1) + (2 + 2 * x1) * (2 + 2 * x1 < 5)
)
tmp95 = tmp93 / tmp94
tl.store(out_ptr0 + (x6 + 512 * x3), tmp51, xmask)
tl.store(out_ptr1 + x7, tmp76, xmask)
tl.store(out_ptr2 + (x6 + 512 * x3), tmp95, xmask)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_7(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 64
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(in_out_ptr0 + x3, tmp7, xmask)
@triton.jit
def triton_poi_fused_avg_pool2d_max_pool2d_with_indices_8(in_ptr0, out_ptr0,
out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
x1 = xindex // 64
tmp0 = tl.full([1], -1, tl.int64)
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 2, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = tmp5 & tmp5
tmp7 = tl.load(in_ptr0 + (-3 + 4 * x2), tmp6 & xmask, eviction_policy=
'evict_last', other=float('-inf'))
tmp8 = tmp1 >= tmp1
tmp9 = tmp1 < tmp3
tmp10 = tmp8 & tmp9
tmp11 = tmp5 & tmp10
tmp12 = tl.load(in_ptr0 + (-2 + 4 * x2), tmp11 & xmask, eviction_policy
='evict_last', other=float('-inf'))
tmp13 = triton_helpers.maximum(tmp12, tmp7)
tmp14 = tl.full([1], 1, tl.int64)
tmp15 = tmp14 >= tmp1
tmp16 = tmp14 < tmp3
tmp17 = tmp15 & tmp16
tmp18 = tmp5 & tmp17
tmp19 = tl.load(in_ptr0 + (-1 + 4 * x2), tmp18 & xmask, eviction_policy
='evict_last', other=float('-inf'))
tmp20 = triton_helpers.maximum(tmp19, tmp13)
tmp21 = tmp10 & tmp5
tmp22 = tl.load(in_ptr0 + (-1 + 4 * x2), tmp21 & xmask, eviction_policy
='evict_last', other=float('-inf'))
tmp23 = triton_helpers.maximum(tmp22, tmp20)
tmp24 = tmp10 & tmp10
tmp25 = tl.load(in_ptr0 + 4 * x2, tmp24 & xmask, eviction_policy=
'evict_last', other=float('-inf'))
tmp26 = triton_helpers.maximum(tmp25, tmp23)
tmp27 = tmp10 & tmp17
tmp28 = tl.load(in_ptr0 + (1 + 4 * x2), tmp27 & xmask, eviction_policy=
'evict_last', other=float('-inf'))
tmp29 = triton_helpers.maximum(tmp28, tmp26)
tmp30 = tmp17 & tmp5
tmp31 = tl.load(in_ptr0 + (1 + 4 * x2), tmp30 & xmask, eviction_policy=
'evict_last', other=float('-inf'))
tmp32 = triton_helpers.maximum(tmp31, tmp29)
tmp33 = tmp17 & tmp10
tmp34 = tl.load(in_ptr0 + (2 + 4 * x2), tmp33 & xmask, eviction_policy=
'evict_last', other=float('-inf'))
tmp35 = triton_helpers.maximum(tmp34, tmp32)
tmp36 = tmp17 & tmp17
tmp37 = tl.load(in_ptr0 + (3 + 4 * x2), tmp36 & xmask, eviction_policy=
'evict_last', other=float('-inf'))
tmp38 = triton_helpers.maximum(tmp37, tmp35)
tmp39 = tmp12 > tmp7
tmp40 = tl.full([1], 1, tl.int8)
tmp41 = tl.full([1], 0, tl.int8)
tmp42 = tl.where(tmp39, tmp40, tmp41)
tmp43 = tmp19 > tmp13
tmp44 = tl.full([1], 2, tl.int8)
tmp45 = tl.where(tmp43, tmp44, tmp42)
tmp46 = tmp22 > tmp20
tmp47 = tl.full([1], 3, tl.int8)
tmp48 = tl.where(tmp46, tmp47, tmp45)
tmp49 = tmp25 > tmp23
tmp50 = tl.full([1], 4, tl.int8)
tmp51 = tl.where(tmp49, tmp50, tmp48)
tmp52 = tmp28 > tmp26
tmp53 = tl.full([1], 5, tl.int8)
tmp54 = tl.where(tmp52, tmp53, tmp51)
tmp55 = tmp31 > tmp29
tmp56 = tl.full([1], 6, tl.int8)
tmp57 = tl.where(tmp55, tmp56, tmp54)
tmp58 = tmp34 > tmp32
tmp59 = tl.full([1], 7, tl.int8)
tmp60 = tl.where(tmp58, tmp59, tmp57)
tmp61 = tmp37 > tmp35
tmp62 = tl.full([1], 8, tl.int8)
tmp63 = tl.where(tmp61, tmp62, tmp60)
tmp64 = tl.load(in_ptr0 + (-3 + 4 * x2), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp65 = tl.load(in_ptr0 + (-2 + 4 * x2), tmp11 & xmask, eviction_policy
='evict_last', other=0.0)
tmp66 = tmp65 + tmp64
tmp67 = tl.load(in_ptr0 + (-1 + 4 * x2), tmp18 & xmask, eviction_policy
='evict_last', other=0.0)
tmp68 = tmp67 + tmp66
tmp69 = tl.load(in_ptr0 + (-1 + 4 * x2), tmp21 & xmask, eviction_policy
='evict_last', other=0.0)
tmp70 = tmp69 + tmp68
tmp71 = tl.load(in_ptr0 + 4 * x2, tmp24 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp72 = tmp71 + tmp70
tmp73 = tl.load(in_ptr0 + (1 + 4 * x2), tmp27 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp74 = tmp73 + tmp72
tmp75 = tl.load(in_ptr0 + (1 + 4 * x2), tmp30 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp76 = tmp75 + tmp74
tmp77 = tl.load(in_ptr0 + (2 + 4 * x2), tmp33 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp78 = tmp77 + tmp76
tmp79 = tl.load(in_ptr0 + (3 + 4 * x2), tmp36 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp80 = tmp79 + tmp78
tmp81 = tl.full([1], 9, tl.int32)
tmp82 = tmp80 / tmp81
tl.store(out_ptr0 + (x0 + 128 * x1), tmp38, xmask)
tl.store(out_ptr1 + x2, tmp63, xmask)
tl.store(out_ptr2 + (x0 + 128 * x1), tmp82, xmask)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_9(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(in_out_ptr0 + x2, tmp7, xmask)
@triton.jit
def triton_poi_fused__to_copy_10(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 2
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_clamp_11(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 2
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.full([1], 1, tl.int64)
tmp10 = tmp8 + tmp9
tmp11 = tl.full([1], 0, tl.int64)
tmp12 = triton_helpers.minimum(tmp10, tmp11)
tl.store(out_ptr0 + x0, tmp12, xmask)
@triton.jit
def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_12(out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 2
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 - tmp9
tmp11 = triton_helpers.maximum(tmp10, tmp6)
tmp12 = 1.0
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tl.store(out_ptr0 + x0, tmp13, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_leaky_relu_backward_mul_sub_13(
in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5,
in_ptr6, in_ptr7, in_ptr8, in_ptr9, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 2 % 2
x0 = xindex % 2
x5 = xindex // 4
x2 = xindex // 4 % 64
x6 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + x2, xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr6 + x6, xmask)
tmp26 = tl.load(in_ptr7 + x2, xmask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr8 + x1, xmask, eviction_policy='evict_last')
tmp36 = tl.load(in_ptr9 + x1, xmask, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 1, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tl.where(tmp7, tmp6, tmp5)
tmp11 = tmp9 + tmp10
tmp12 = 0.0
tmp13 = tmp11 > tmp12
tmp14 = 0.1
tmp15 = tmp11 * tmp14
tmp16 = tl.where(tmp13, tmp11, tmp15)
tmp18 = tmp17 + tmp1
tmp19 = tmp17 < 0
tl.where(tmp19, tmp18, tmp17)
tmp21 = tmp16 - tmp16
tmp23 = tmp21 * tmp22
tmp24 = tmp16 + tmp23
tmp27 = tmp25 + tmp26
tmp28 = tmp27 > tmp12
tmp29 = tmp27 * tmp14
tmp30 = tl.where(tmp28, tmp27, tmp29)
tmp32 = tmp31 + tmp1
tmp33 = tmp31 < 0
tl.where(tmp33, tmp32, tmp31)
tmp35 = tmp24 - tmp24
tmp37 = tmp35 * tmp36
tmp38 = tmp24 + tmp37
tmp39 = tmp30 + tmp38
tmp40 = tmp30 > tmp12
tl.store(in_out_ptr0 + x6, tmp39, xmask)
tl.store(out_ptr0 + x6, tmp40, xmask)
@triton.jit
def triton_poi_fused__to_copy_14(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_clamp_15(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.full([1], 1, tl.int64)
tmp10 = tmp8 + tmp9
tmp11 = triton_helpers.minimum(tmp10, tmp9)
tl.store(out_ptr0 + x0, tmp11, xmask)
@triton.jit
def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_16(out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 - tmp9
tmp11 = triton_helpers.maximum(tmp10, tmp6)
tmp12 = 1.0
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tl.store(out_ptr0 + x0, tmp13, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_17(
in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5,
in_ptr6, in_ptr7, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 4 % 4
x0 = xindex % 4
x6 = xindex // 16
x2 = xindex // 16 % 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + x2, None, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr5 + x0, None, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr6 + x1, None, eviction_policy='evict_last')
tmp48 = tl.load(in_ptr7 + x1, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 2, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + 2 * tmp4 + 4 * x6), None,
eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp12 = 0.0
tmp13 = tmp11 > tmp12
tmp14 = 0.1
tmp15 = tmp11 * tmp14
tmp16 = tl.where(tmp13, tmp11, tmp15)
tmp18 = tmp17 + tmp1
tmp19 = tmp17 < 0
tmp20 = tl.where(tmp19, tmp18, tmp17)
tmp21 = tl.load(in_ptr2 + (tmp20 + 2 * tmp4 + 4 * x6), None,
eviction_policy='evict_last')
tmp22 = tmp21 + tmp10
tmp23 = tmp22 > tmp12
tmp24 = tmp22 * tmp14
tmp25 = tl.where(tmp23, tmp22, tmp24)
tmp26 = tmp25 - tmp16
tmp28 = tmp26 * tmp27
tmp29 = tmp16 + tmp28
tmp31 = tmp30 + tmp1
tmp32 = tmp30 < 0
tmp33 = tl.where(tmp32, tmp31, tmp30)
tmp34 = tl.load(in_ptr2 + (tmp8 + 2 * tmp33 + 4 * x6), None,
eviction_policy='evict_last')
tmp35 = tmp34 + tmp10
tmp36 = tmp35 > tmp12
tmp37 = tmp35 * tmp14
tmp38 = tl.where(tmp36, tmp35, tmp37)
tmp39 = tl.load(in_ptr2 + (tmp20 + 2 * tmp33 + 4 * x6), None,
eviction_policy='evict_last')
tmp40 = tmp39 + tmp10
tmp41 = tmp40 > tmp12
tmp42 = tmp40 * tmp14
tmp43 = tl.where(tmp41, tmp40, tmp42)
tmp44 = tmp43 - tmp38
tmp45 = tmp44 * tmp27
tmp46 = tmp38 + tmp45
tmp47 = tmp46 - tmp29
tmp49 = tmp47 * tmp48
tmp50 = tmp29 + tmp49
tl.store(in_out_ptr0 + x4, tmp50, None)
@triton.jit
def triton_poi_fused_add_convolution_leaky_relu_mul_sigmoid_18(in_out_ptr0,
in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + x3, None)
tmp13 = tl.load(in_out_ptr1 + x3, None)
tmp14 = tl.load(in_ptr2 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp9 = tl.sigmoid(tmp8)
tmp10 = tmp7 * tmp9
tmp11 = 2.0
tmp12 = tmp10 * tmp11
tmp15 = tmp13 + tmp14
tmp16 = tmp12 + tmp15
tl.store(in_out_ptr0 + x3, tmp2, None)
tl.store(in_out_ptr1 + x3, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_19(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tmp7 > tmp3
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_20(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tmp7 > tmp3
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27) = args
args.clear()
assert_size_stride(primals_1, (4, 5, 64, 4, 4), (5120, 1024, 16, 4, 1))
assert_size_stride(primals_2, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_3, (64,), (1,))
assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (64, 320, 1, 1), (320, 1, 1, 1))
assert_size_stride(primals_7, (64,), (1,))
assert_size_stride(primals_8, (64, 320, 1, 1), (320, 1, 1, 1))
assert_size_stride(primals_9, (64,), (1,))
assert_size_stride(primals_10, (64, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_11, (64,), (1,))
assert_size_stride(primals_12, (64, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_13, (64,), (1,))
assert_size_stride(primals_14, (64, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_15, (64,), (1,))
assert_size_stride(primals_16, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_17, (64,), (1,))
assert_size_stride(primals_18, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_19, (64,), (1,))
assert_size_stride(primals_20, (64, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_21, (64,), (1,))
assert_size_stride(primals_22, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_23, (64,), (1,))
assert_size_stride(primals_24, (64, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_25, (64,), (1,))
assert_size_stride(primals_26, (64, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_27, (64,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 64, 4, 4), (1024, 16, 4, 1), torch.
float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(4096)](primals_1, buf0, 4096, XBLOCK=
128, num_warps=4, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 64, 4, 4), (1024, 16, 4, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(4096)](buf2, primals_3, 4096,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_3
buf3 = extern_kernels.convolution(reinterpret_tensor(primals_1, (20,
64, 4, 4), (1024, 16, 4, 1), 0), primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (20, 64, 4, 4), (1024, 16, 4, 1))
buf4 = buf3
del buf3
triton_poi_fused_convolution_2[grid(20480)](buf4, primals_5, 20480,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf15 = empty_strided_cuda((4, 5, 4, 4), (80, 16, 4, 1), torch.float32)
buf10 = reinterpret_tensor(buf15, (4, 1, 4, 4), (80, 16, 4, 1), 0)
buf11 = reinterpret_tensor(buf15, (4, 1, 4, 4), (80, 16, 4, 1), 16)
buf12 = reinterpret_tensor(buf15, (4, 1, 4, 4), (80, 16, 4, 1), 32)
buf13 = reinterpret_tensor(buf15, (4, 1, 4, 4), (80, 16, 4, 1), 48)
buf14 = reinterpret_tensor(buf15, (4, 1, 4, 4), (80, 16, 4, 1), 64)
triton_per_fused_cat_mul_sum_3[grid(64)](buf4, buf2, buf10, buf11,
buf12, buf13, buf14, 64, 64, XBLOCK=32, num_warps=8, num_stages=1)
buf16 = empty_strided_cuda((4, 320, 4, 4), (5120, 16, 4, 1), torch.
float32)
triton_poi_fused_mul_4[grid(20480)](primals_1, buf15, buf16, 20480,
XBLOCK=256, num_warps=4, num_stages=1)
buf17 = extern_kernels.convolution(buf16, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf17, (4, 64, 4, 4), (1024, 16, 4, 1))
buf19 = extern_kernels.convolution(buf16, primals_8, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf19, (4, 64, 4, 4), (1024, 16, 4, 1))
buf20 = buf19
del buf19
triton_poi_fused_convolution_leaky_relu_5[grid(4096)](buf20,
primals_9, 4096, XBLOCK=256, num_warps=4, num_stages=1)
del primals_9
buf24 = empty_strided_cuda((4, 128, 2, 2), (512, 4, 2, 1), torch.
float32)
buf21 = reinterpret_tensor(buf24, (4, 64, 2, 2), (512, 4, 2, 1), 0)
buf22 = empty_strided_cuda((4, 64, 2, 2), (256, 4, 2, 1), torch.int8)
buf23 = reinterpret_tensor(buf24, (4, 64, 2, 2), (512, 4, 2, 1), 256)
triton_poi_fused_avg_pool2d_max_pool2d_with_indices_6[grid(1024)](buf20
, buf21, buf22, buf23, 1024, XBLOCK=128, num_warps=4, num_stages=1)
buf25 = extern_kernels.convolution(buf24, primals_10, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf25, (4, 64, 2, 2), (256, 4, 2, 1))
buf26 = buf25
del buf25
triton_poi_fused_convolution_leaky_relu_7[grid(1024)](buf26,
primals_11, 1024, XBLOCK=256, num_warps=4, num_stages=1)
del primals_11
buf27 = extern_kernels.convolution(buf26, primals_12, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf27, (4, 64, 2, 2), (256, 4, 2, 1))
buf28 = buf27
del buf27
triton_poi_fused_convolution_leaky_relu_7[grid(1024)](buf28,
primals_13, 1024, XBLOCK=256, num_warps=4, num_stages=1)
del primals_13
buf32 = empty_strided_cuda((4, 128, 1, 1), (128, 1, 1, 1), torch.
float32)
buf29 = reinterpret_tensor(buf32, (4, 64, 1, 1), (128, 1, 1, 1), 0)
buf30 = empty_strided_cuda((4, 64, 1, 1), (64, 1, 1, 1), torch.int8)
buf31 = reinterpret_tensor(buf32, (4, 64, 1, 1), (128, 1, 1, 1), 64)
triton_poi_fused_avg_pool2d_max_pool2d_with_indices_8[grid(256)](buf28,
buf29, buf30, buf31, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf33 = extern_kernels.convolution(buf32, primals_14, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf33, (4, 64, 1, 1), (64, 1, 1, 1))
buf34 = buf33
del buf33
triton_poi_fused_convolution_leaky_relu_9[grid(256)](buf34,
primals_15, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_15
buf35 = extern_kernels.convolution(buf34, primals_16, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf35, (4, 64, 1, 1), (64, 1, 1, 1))
buf36 = empty_strided_cuda((2, 1), (1, 1), torch.int64)
triton_poi_fused__to_copy_10[grid(2)](buf36, 2, XBLOCK=2, num_warps
=1, num_stages=1)
buf37 = empty_strided_cuda((2, 1), (1, 1), torch.int64)
triton_poi_fused_add_clamp_11[grid(2)](buf37, 2, XBLOCK=2,
num_warps=1, num_stages=1)
buf38 = empty_strided_cuda((2,), (1,), torch.int64)
triton_poi_fused__to_copy_10[grid(2)](buf38, 2, XBLOCK=2, num_warps
=1, num_stages=1)
buf39 = empty_strided_cuda((2,), (1,), torch.int64)
triton_poi_fused_add_clamp_11[grid(2)](buf39, 2, XBLOCK=2,
num_warps=1, num_stages=1)
buf40 = empty_strided_cuda((2,), (1,), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_12[grid(2)](buf40,
2, XBLOCK=2, num_warps=1, num_stages=1)
buf42 = empty_strided_cuda((2, 1), (1, 1), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_12[grid(2)](buf42,
2, XBLOCK=2, num_warps=1, num_stages=1)
buf43 = extern_kernels.convolution(buf26, primals_18, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf43, (4, 64, 2, 2), (256, 4, 2, 1))
buf41 = empty_strided_cuda((4, 64, 2, 2), (256, 4, 2, 1), torch.float32
)
buf44 = buf41
del buf41
buf62 = empty_strided_cuda((4, 64, 2, 2), (256, 4, 2, 1), torch.bool)
triton_poi_fused__unsafe_index_add_convolution_leaky_relu_leaky_relu_backward_mul_sub_13[
grid(1024)](buf44, buf36, buf38, buf35, primals_17, buf39,
buf40, buf43, primals_19, buf37, buf42, buf62, 1024, XBLOCK=128,
num_warps=4, num_stages=1)
del buf43
del primals_19
buf45 = extern_kernels.convolution(buf44, primals_20, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf45, (4, 64, 2, 2), (256, 4, 2, 1))
buf46 = empty_strided_cuda((4, 1), (1, 1), torch.int64)
triton_poi_fused__to_copy_14[grid(4)](buf46, 4, XBLOCK=4, num_warps
=1, num_stages=1)
buf47 = empty_strided_cuda((4, 1), (1, 1), torch.int64)
triton_poi_fused_add_clamp_15[grid(4)](buf47, 4, XBLOCK=4,
num_warps=1, num_stages=1)
buf48 = empty_strided_cuda((4,), (1,), torch.int64)
triton_poi_fused__to_copy_14[grid(4)](buf48, 4, XBLOCK=4, num_warps
=1, num_stages=1)
buf49 = empty_strided_cuda((4,), (1,), torch.int64)
triton_poi_fused_add_clamp_15[grid(4)](buf49, 4, XBLOCK=4,
num_warps=1, num_stages=1)
buf50 = empty_strided_cuda((4,), (1,), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_16[grid(4)](buf50,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf52 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_16[grid(4)](buf52,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf53 = empty_strided_cuda((4, 64, 4, 4), (1024, 16, 4, 1), torch.
float32)
buf54 = buf53
del buf53
triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_17[
grid(4096)](buf54, buf46, buf48, buf45, primals_21, buf49,
buf50, buf47, buf52, 4096, XBLOCK=128, num_warps=4, num_stages=1)
buf55 = extern_kernels.convolution(buf54, primals_22, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf55, (4, 64, 4, 4), (1024, 16, 4, 1))
buf56 = buf55
del buf55
triton_poi_fused_convolution_1[grid(4096)](buf56, primals_23, 4096,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_23
buf57 = extern_kernels.convolution(buf56, primals_24, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf57, (4, 64, 4, 4), (1024, 16, 4, 1))
buf58 = buf57
del buf57
triton_poi_fused_convolution_leaky_relu_5[grid(4096)](buf58,
primals_25, 4096, XBLOCK=256, num_warps=4, num_stages=1)
del primals_25
buf59 = extern_kernels.convolution(buf58, primals_26, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf59, (4, 64, 4, 4), (1024, 16, 4, 1))
buf18 = buf17
del buf17
buf60 = buf59
del buf59
triton_poi_fused_add_convolution_leaky_relu_mul_sigmoid_18[grid(4096)](
buf18, buf60, primals_7, buf56, primals_27, 4096, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_27
del primals_7
buf61 = empty_strided_cuda((4, 64, 2, 2), (256, 4, 2, 1), torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_19[grid
(1024)](buf45, primals_21, buf61, 1024, XBLOCK=256, num_warps=4,
num_stages=1)
del buf45
del primals_21
buf63 = empty_strided_cuda((4, 64, 1, 1), (64, 1, 1, 1), torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_20[grid
(256)](buf35, primals_17, buf63, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del buf35
del primals_17
return (buf60, primals_1, primals_2, primals_4, primals_6, primals_8,
primals_10, primals_12, primals_14, primals_16, primals_18,
primals_20, primals_22, primals_24, primals_26, buf0, buf2,
reinterpret_tensor(buf4, (4, 64, 4, 4), (5120, 16, 4, 1), 0),
reinterpret_tensor(buf4, (4, 64, 4, 4), (5120, 16, 4, 1), 1024),
reinterpret_tensor(buf4, (4, 64, 4, 4), (5120, 16, 4, 1), 2048),
reinterpret_tensor(buf4, (4, 64, 4, 4), (5120, 16, 4, 1), 3072),
reinterpret_tensor(buf4, (4, 64, 4, 4), (5120, 16, 4, 1), 4096),
buf15, buf16, buf18, buf20, buf22, buf24, buf26, buf28, buf30,
buf32, buf34, buf36, buf37, buf38, buf39, buf40, buf42, buf44,
buf46, buf47, buf48, buf49, buf50, buf52, buf54, buf56, buf58,
buf61, buf62, buf63)
class TSA_FusionNew(nn.Module):
""" Temporal Spatial Attention fusion module
Temporal: correlation;
Spatial: 3 pyramid levels.
"""
def __init__(self, nf=64, nframes=5, center=2):
super(TSA_FusionNew, self).__init__()
self.center = center
self.tAtt_1 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.tAtt_2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.fea_fusion = nn.Conv2d(nframes * nf, nf, 1, 1, bias=True)
self.sAtt_1 = nn.Conv2d(nframes * nf, nf, 1, 1, bias=True)
self.maxpool = nn.MaxPool2d(3, stride=2, padding=1)
self.avgpool = nn.AvgPool2d(3, stride=2, padding=1)
self.sAtt_2 = nn.Conv2d(nf * 2, nf, 1, 1, bias=True)
self.sAtt_3 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.sAtt_4 = nn.Conv2d(nf, nf, 1, 1, bias=True)
self.sAtt_5 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.sAtt_L1 = nn.Conv2d(nf, nf, 1, 1, bias=True)
self.sAtt_L2 = nn.Conv2d(nf * 2, nf, 3, 1, 1, bias=True)
self.sAtt_L3 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.sAtt_add_1 = nn.Conv2d(nf, nf, 1, 1, bias=True)
self.sAtt_add_2 = nn.Conv2d(nf, nf, 1, 1, bias=True)
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
def forward(self, input_0):
primals_2 = self.tAtt_1.weight
primals_3 = self.tAtt_1.bias
primals_4 = self.tAtt_2.weight
primals_5 = self.tAtt_2.bias
primals_6 = self.fea_fusion.weight
primals_7 = self.fea_fusion.bias
primals_8 = self.sAtt_1.weight
primals_9 = self.sAtt_1.bias
primals_10 = self.sAtt_2.weight
primals_11 = self.sAtt_2.bias
primals_16 = self.sAtt_3.weight
primals_13 = self.sAtt_3.bias
primals_12 = self.sAtt_4.weight
primals_15 = self.sAtt_4.bias
primals_18 = self.sAtt_5.weight
primals_17 = self.sAtt_5.bias
primals_20 = self.sAtt_L1.weight
primals_19 = self.sAtt_L1.bias
primals_14 = self.sAtt_L2.weight
primals_21 = self.sAtt_L2.bias
primals_22 = self.sAtt_L3.weight
primals_23 = self.sAtt_L3.bias
primals_24 = self.sAtt_add_1.weight
primals_25 = self.sAtt_add_1.bias
primals_26 = self.sAtt_add_2.weight
primals_27 = self.sAtt_add_2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27])
return output[0]
| creeper121386/EDVR-modified | TSA_Fusion | false | 1,816 | [
"Apache-2.0"
] | 0 | 3fa565b99811e8f84f6ea3793090614606382332 | https://github.com/creeper121386/EDVR-modified/tree/3fa565b99811e8f84f6ea3793090614606382332 | import torch
import torch.utils.data
import torch.nn.functional as F
import torch.nn as nn
class Model(nn.Module):
""" Temporal Spatial Attention fusion module
Temporal: correlation;
Spatial: 3 pyramid levels.
"""
def __init__(self, nf=64, nframes=5, center=2):
super().__init__()
self.center = center
self.tAtt_1 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.tAtt_2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.fea_fusion = nn.Conv2d(nframes * nf, nf, 1, 1, bias=True)
self.sAtt_1 = nn.Conv2d(nframes * nf, nf, 1, 1, bias=True)
self.maxpool = nn.MaxPool2d(3, stride=2, padding=1)
self.avgpool = nn.AvgPool2d(3, stride=2, padding=1)
self.sAtt_2 = nn.Conv2d(nf * 2, nf, 1, 1, bias=True)
self.sAtt_3 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.sAtt_4 = nn.Conv2d(nf, nf, 1, 1, bias=True)
self.sAtt_5 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.sAtt_L1 = nn.Conv2d(nf, nf, 1, 1, bias=True)
self.sAtt_L2 = nn.Conv2d(nf * 2, nf, 3, 1, 1, bias=True)
self.sAtt_L3 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.sAtt_add_1 = nn.Conv2d(nf, nf, 1, 1, bias=True)
self.sAtt_add_2 = nn.Conv2d(nf, nf, 1, 1, bias=True)
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
def forward(self, aligned_fea):
B, N, C, H, W = aligned_fea.size()
emb_ref = self.tAtt_2(aligned_fea[:, self.center, :, :, :].clone())
emb = self.tAtt_1(aligned_fea.view(-1, C, H, W)).view(B, N, -1, H, W)
cor_l = []
for i in range(N):
emb_nbr = emb[:, i, :, :, :]
cor_tmp = torch.sum(emb_nbr * emb_ref, 1).unsqueeze(1)
cor_l.append(cor_tmp)
cor_prob = torch.sigmoid(torch.cat(cor_l, dim=1))
cor_prob = cor_prob.unsqueeze(2).repeat(1, 1, C, 1, 1).view(B, -1, H, W
)
aligned_fea = aligned_fea.view(B, -1, H, W) * cor_prob
fea = self.lrelu(self.fea_fusion(aligned_fea))
att = self.lrelu(self.sAtt_1(aligned_fea))
att_max = self.maxpool(att)
att_avg = self.avgpool(att)
att = self.lrelu(self.sAtt_2(torch.cat([att_max, att_avg], dim=1)))
att_L = self.lrelu(self.sAtt_L1(att))
att_max = self.maxpool(att_L)
att_avg = self.avgpool(att_L)
att_L = self.lrelu(self.sAtt_L2(torch.cat([att_max, att_avg], dim=1)))
att_L = self.lrelu(self.sAtt_L3(att_L))
att_L = F.interpolate(att_L, scale_factor=2, mode='bilinear',
align_corners=False)
att = self.lrelu(self.sAtt_3(att))
att = att + att_L
att = self.lrelu(self.sAtt_4(att))
att = F.interpolate(att, scale_factor=2, mode='bilinear',
align_corners=False)
att = self.sAtt_5(att)
att_add = self.sAtt_add_2(self.lrelu(self.sAtt_add_1(att)))
att = torch.sigmoid(att)
fea = fea * att * 2 + att_add
return fea
def get_inputs():
return [torch.rand([4, 5, 64, 4, 4])]
def get_init_inputs():
return []
|
ps_FNNDenoiser | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/z5/cz5bgdo2gmhnnmtf6w7lrjkvliacxo7nomq7mbmjquxqyxqgt5bj.py
# Topologically Sorted Source Nodes: [fnn_enc_output], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# fnn_enc_output => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 2
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/nt/cnt7ck7sh56p6fyp544fh6eduvfqtaz7sxosnvvueljzvaiycmfc.py
# Topologically Sorted Source Nodes: [fnn_dec_output, v_j_filt], Original ATen: [aten.relu, aten.mul, aten.threshold_backward]
# Source node to ATen node mapping:
# fnn_dec_output => relu_1
# v_j_filt => mul
# Graph fragment:
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%relu_1, %primals_3), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_mul_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_mul_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_relu_threshold_backward_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_relu_threshold_backward_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + (x2), xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = tmp4 * tmp5
tmp7 = 0.0
tmp8 = tmp4 <= tmp7
tl.store(out_ptr0 + (x2), tmp6, xmask)
tl.store(out_ptr1 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (2, 4), (4, 1))
assert_size_stride(primals_2, (2, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 2), (2, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 2), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 2), (32, 8, 2, 1), 0); del buf0 # reuse
buf5 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.bool)
# Topologically Sorted Source Nodes: [fnn_enc_output], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf5, 128, grid=grid(128), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 2), (2, 1), 0), reinterpret_tensor(primals_4, (2, 4), (1, 2), 0), out=buf2)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [fnn_dec_output, v_j_filt], Original ATen: [aten.relu, aten.mul, aten.threshold_backward]
triton_poi_fused_mul_relu_threshold_backward_1.run(buf2, primals_5, primals_3, buf3, buf4, 256, grid=grid(256), stream=stream0)
del buf2
del primals_5
return (buf3, primals_3, reinterpret_tensor(buf1, (64, 2), (2, 1), 0), buf4, primals_4, buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((2, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 2), (2, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from torch.nn import Module
import torch
from torch.nn import Linear
from torch.nn.init import xavier_normal_
from torch.nn.functional import relu
class ps_FNNDenoiser(Module):
def __init__(self, input_dim):
"""The FNN enc and FNN dec of the Denoiser.
:param input_dim: The input dimensionality.
:type input_dim: int
"""
super(ps_FNNDenoiser, self).__init__()
self._input_dim = input_dim
self.fnn_enc = Linear(self._input_dim, int(self._input_dim / 2))
self.fnn_dec = Linear(int(self._input_dim / 2), self._input_dim)
self.initialize_module()
def initialize_module(self):
"""Manual weight/bias initialization.
"""
xavier_normal_(self.fnn_enc.weight)
self.fnn_enc.bias.data.zero_()
xavier_normal_(self.fnn_dec.weight)
self.fnn_dec.bias.data.zero_()
def forward(self, v_j_filt_prime):
"""The forward pass.
:param v_j_filt_prime: The output of the Masker.
:type v_j_filt_prime: torch.autograd.variable.Variable
:return: The output of the Denoiser
:rtype: torch.autograd.variable.Variable
"""
fnn_enc_output = relu(self.fnn_enc(v_j_filt_prime))
fnn_dec_output = relu(self.fnn_dec(fnn_enc_output))
v_j_filt = fnn_dec_output.mul(v_j_filt_prime)
return v_j_filt
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch.nn import Module
from torch.nn import Linear
from torch.nn.init import xavier_normal_
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 2
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_mul_relu_threshold_backward_1(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = tmp4 * tmp5
tmp7 = 0.0
tmp8 = tmp4 <= tmp7
tl.store(out_ptr0 + x2, tmp6, xmask)
tl.store(out_ptr1 + x2, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (2, 4), (4, 1))
assert_size_stride(primals_2, (2,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 2), (2, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 2), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 2), (32, 8, 2, 1), 0)
del buf0
buf5 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(128)](buf1,
primals_2, buf5, 128, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 2), (2, 1), 0),
reinterpret_tensor(primals_4, (2, 4), (1, 2), 0), out=buf2)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_mul_relu_threshold_backward_1[grid(256)](buf2,
primals_5, primals_3, buf3, buf4, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del buf2
del primals_5
return buf3, primals_3, reinterpret_tensor(buf1, (64, 2), (2, 1), 0
), buf4, primals_4, buf5
class ps_FNNDenoiserNew(Module):
def __init__(self, input_dim):
"""The FNN enc and FNN dec of the Denoiser.
:param input_dim: The input dimensionality.
:type input_dim: int
"""
super(ps_FNNDenoiserNew, self).__init__()
self._input_dim = input_dim
self.fnn_enc = Linear(self._input_dim, int(self._input_dim / 2))
self.fnn_dec = Linear(int(self._input_dim / 2), self._input_dim)
self.initialize_module()
def initialize_module(self):
"""Manual weight/bias initialization.
"""
xavier_normal_(self.fnn_enc.weight)
self.fnn_enc.bias.data.zero_()
xavier_normal_(self.fnn_dec.weight)
self.fnn_dec.bias.data.zero_()
def forward(self, input_0):
primals_1 = self.fnn_enc.weight
primals_2 = self.fnn_enc.bias
primals_4 = self.fnn_dec.weight
primals_5 = self.fnn_dec.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| ddcas/singing-language-identification | ps_FNNDenoiser | false | 1,817 | [
"MIT"
] | 0 | d104419b196d56d4de37cff47c32e88e28c58690 | https://github.com/ddcas/singing-language-identification/tree/d104419b196d56d4de37cff47c32e88e28c58690 | from torch.nn import Module
import torch
from torch.nn import Linear
from torch.nn.init import xavier_normal_
from torch.nn.functional import relu
class Model(Module):
def __init__(self, input_dim):
"""The FNN enc and FNN dec of the Denoiser.
:param input_dim: The input dimensionality.
:type input_dim: int
"""
super().__init__()
self._input_dim = input_dim
self.fnn_enc = Linear(self._input_dim, int(self._input_dim / 2))
self.fnn_dec = Linear(int(self._input_dim / 2), self._input_dim)
self.initialize_module()
def initialize_module(self):
"""Manual weight/bias initialization.
"""
xavier_normal_(self.fnn_enc.weight)
self.fnn_enc.bias.data.zero_()
xavier_normal_(self.fnn_dec.weight)
self.fnn_dec.bias.data.zero_()
def forward(self, v_j_filt_prime):
"""The forward pass.
:param v_j_filt_prime: The output of the Masker.
:type v_j_filt_prime: torch.autograd.variable.Variable
:return: The output of the Denoiser
:rtype: torch.autograd.variable.Variable
"""
fnn_enc_output = relu(self.fnn_enc(v_j_filt_prime))
fnn_dec_output = relu(self.fnn_dec(fnn_enc_output))
v_j_filt = fnn_dec_output.mul(v_j_filt_prime)
return v_j_filt
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
RegularizedLinear | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/pz/cpzd3zunvtbunbsfzisaee7nto7o24q5vtgqdawe4lkvq26st6l7.py
# Topologically Sorted Source Nodes: [mul, sum_1, mul_1], Original ATen: [aten.mul, aten.sum]
# Source node to ATen node mapping:
# mul => mul
# mul_1 => mul_1
# sum_1 => sum_1
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %view_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 0.001), kwargs = {})
triton_per_fused_mul_sum_0 = async_compile.triton('triton_per_fused_mul_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mul_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mul_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [RBLOCK])
tmp4 = triton_helpers.promote_to_tensor(tl.sum(tmp2, 0))
tmp5 = 0.001
tmp6 = tmp4 * tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/lm/clmqieg7cd4vtujqpqsmfaltmydcasg2mkeo5chrxxqhjlh2iuzw.py
# Topologically Sorted Source Nodes: [abs_1, sum_2, mul_2], Original ATen: [aten.abs, aten.sum, aten.mul]
# Source node to ATen node mapping:
# abs_1 => abs_1
# mul_2 => mul_2
# sum_2 => sum_2
# Graph fragment:
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%primals_1,), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%abs_1,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_2, 0.001), kwargs = {})
triton_per_fused_abs_mul_sum_1 = async_compile.triton('triton_per_fused_abs_mul_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_mul_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_abs_mul_sum_1(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl_math.abs(tmp0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.sum(tmp2, 1)[:, None]
tmp5 = 0.001
tmp6 = tmp4 * tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp6, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_2
buf1 = empty_strided_cuda((), (), torch.float32)
buf3 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [mul, sum_1, mul_1], Original ATen: [aten.mul, aten.sum]
stream0 = get_raw_stream(0)
triton_per_fused_mul_sum_0.run(buf3, buf0, 1, 256, grid=grid(1), stream=stream0)
buf2 = empty_strided_cuda((), (), torch.float32)
buf4 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [abs_1, sum_2, mul_2], Original ATen: [aten.abs, aten.sum, aten.mul]
triton_per_fused_abs_mul_sum_1.run(buf4, primals_1, 1, 16, grid=grid(1), stream=stream0)
return (reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf3, buf4, primals_1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class RegularizedLinear(nn.Linear):
def __init__(self, *args, ar_weight=0.001, l1_weight=0.001, **kwargs):
super(RegularizedLinear, self).__init__(*args, **kwargs)
self.ar_weight = ar_weight
self.l1_weight = l1_weight
self._losses = {}
def forward(self, input):
output = super(RegularizedLinear, self).forward(input)
self._losses['activity_regularization'] = (output * output).sum(
) * self.ar_weight
self._losses['l1_weight_regularization'] = torch.abs(self.weight).sum(
) * self.l1_weight
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mul_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [RBLOCK])
tmp4 = triton_helpers.promote_to_tensor(tl.sum(tmp2, 0))
tmp5 = 0.001
tmp6 = tmp4 * tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp6, None)
@triton.jit
def triton_per_fused_abs_mul_sum_1(in_out_ptr0, in_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl_math.abs(tmp0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.sum(tmp2, 1)[:, None]
tmp5 = 0.001
tmp6 = tmp4 * tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp6, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_2
buf1 = empty_strided_cuda((), (), torch.float32)
buf3 = buf1
del buf1
get_raw_stream(0)
triton_per_fused_mul_sum_0[grid(1)](buf3, buf0, 1, 256, num_warps=2,
num_stages=1)
buf2 = empty_strided_cuda((), (), torch.float32)
buf4 = buf2
del buf2
triton_per_fused_abs_mul_sum_1[grid(1)](buf4, primals_1, 1, 16,
XBLOCK=1, num_warps=2, num_stages=1)
return reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0
), buf3, buf4, primals_1, reinterpret_tensor(primals_3, (64, 4), (4,
1), 0), reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
class RegularizedLinearNew(nn.Linear):
def __init__(self, *args, ar_weight=0.001, l1_weight=0.001, **kwargs):
super(RegularizedLinearNew, self).__init__(*args, **kwargs)
self.ar_weight = ar_weight
self.l1_weight = l1_weight
self._losses = {}
def forward(self, input_0):
primals_1 = self.weight
primals_2 = self.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| dearkafka/inferno | RegularizedLinear | false | 1,818 | [
"Apache-2.0"
] | 0 | e9e3b863fd1fc97cf94d08ac6b4f8df7665f996a | https://github.com/dearkafka/inferno/tree/e9e3b863fd1fc97cf94d08ac6b4f8df7665f996a | import torch
import torch.nn as nn
class Model(nn.Linear):
def __init__(self, *args, ar_weight=0.001, l1_weight=0.001, **kwargs):
super().__init__(*args, **kwargs)
self.ar_weight = ar_weight
self.l1_weight = l1_weight
self._losses = {}
def forward(self, input):
output = super(RegularizedLinear, self).forward(input)
self._losses['activity_regularization'] = (output * output).sum(
) * self.ar_weight
self._losses['l1_weight_regularization'] = torch.abs(self.weight).sum(
) * self.l1_weight
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
SorensenDiceLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/ka/cka2cen3ln45uzcldwvgt5ed6fjahmzninhd3llq632jf6gwu4e2.py
# Topologically Sorted Source Nodes: [mul, numerator, mul_1, sum_2, mul_2, sum_3], Original ATen: [aten.mul, aten.sum]
# Source node to ATen node mapping:
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# numerator => sum_1
# sum_2 => sum_2
# sum_3 => sum_3
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %view_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [-1]), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %view), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [-1]), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %view_1), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_2, [-1]), kwargs = {})
triton_per_fused_mul_sum_0 = async_compile.triton('triton_per_fused_mul_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mul_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + ((16*x0) + (64*(r1 // 16)) + (r1 % 16)), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + ((16*x0) + (64*(r1 // 16)) + (r1 % 16)), xmask, other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp7 = tmp0 * tmp0
tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK])
tmp10 = tl.where(xmask, tmp8, 0)
tmp11 = tl.sum(tmp10, 1)[:, None]
tmp12 = tmp1 * tmp1
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tl.store(out_ptr0 + (x0), tmp6, xmask)
tl.store(out_ptr1 + (x0), tmp11, xmask)
tl.store(out_ptr2 + (x0), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/i6/ci6n4y4o7rzhmejnnogeyaw6by5bwspd3okw2uue47vhp55cujbh.py
# Topologically Sorted Source Nodes: [denominator, clamp, truediv, channelwise_loss, loss], Original ATen: [aten.add, aten.clamp, aten.div, aten.mul, aten.sum]
# Source node to ATen node mapping:
# channelwise_loss => mul_3
# clamp => clamp_min
# denominator => add
# loss => sum_4
# truediv => div
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, %sum_3), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add, 1e-06), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, %clamp_min), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, -2), kwargs = {})
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_3,), kwargs = {})
triton_per_fused_add_clamp_div_mul_sum_1 = async_compile.triton('triton_per_fused_add_clamp_div_mul_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_clamp_div_mul_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_clamp_div_mul_sum_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = tl.load(in_ptr2 + (r0), None)
tmp3 = tmp1 + tmp2
tmp4 = 1e-06
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp0 / tmp5
tmp7 = -2.0
tmp8 = tmp6 * tmp7
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.sum(tmp9, 1)[:, None]
tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp11, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf1 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf2 = empty_strided_cuda((4, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [mul, numerator, mul_1, sum_2, mul_2, sum_3], Original ATen: [aten.mul, aten.sum]
stream0 = get_raw_stream(0)
triton_per_fused_mul_sum_0.run(arg0_1, arg1_1, buf0, buf1, buf2, 4, 64, grid=grid(4), stream=stream0)
del arg0_1
del arg1_1
buf3 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [denominator, clamp, truediv, channelwise_loss, loss], Original ATen: [aten.add, aten.clamp, aten.div, aten.mul, aten.sum]
triton_per_fused_add_clamp_div_mul_sum_1.run(buf0, buf1, buf2, buf3, 1, 4, grid=grid(1), stream=stream0)
del buf0
del buf1
del buf2
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from torch.autograd import Variable
def assert_(condition, message='', exception_type=AssertionError):
"""Like assert, but with arbitrary exception types."""
if not condition:
raise exception_type(message)
def flatten_samples(tensor_or_variable):
"""
Flattens a tensor or a variable such that the channel axis is first and the sample axis
is second. The shapes are transformed as follows:
(N, C, H, W) --> (C, N * H * W)
(N, C, D, H, W) --> (C, N * D * H * W)
(N, C) --> (C, N)
The input must be atleast 2d.
"""
assert_(tensor_or_variable.dim() >= 2,
'Tensor or variable must be atleast 2D. Got one of dim {}.'.format(
tensor_or_variable.dim()), ShapeError)
num_channels = tensor_or_variable.size(1)
permute_axes = list(range(tensor_or_variable.dim()))
permute_axes[0], permute_axes[1] = permute_axes[1], permute_axes[0]
permuted = tensor_or_variable.permute(*permute_axes).contiguous()
flattened = permuted.view(num_channels, -1)
return flattened
class ShapeError(ValueError):
pass
class SorensenDiceLoss(nn.Module):
"""
Computes a loss scalar, which when minimized maximizes the Sorensen-Dice similarity
between the input and the target. For both inputs and targets it must be the case that
`input_or_target.size(1) = num_channels`.
"""
def __init__(self, weight=None, channelwise=True, eps=1e-06):
"""
Parameters
----------
weight : torch.FloatTensor or torch.cuda.FloatTensor
Class weights. Applies only if `channelwise = True`.
channelwise : bool
Whether to apply the loss channelwise and sum the results (True)
or to apply it on all channels jointly (False).
"""
super(SorensenDiceLoss, self).__init__()
self.register_buffer('weight', weight)
self.channelwise = channelwise
self.eps = eps
def forward(self, input, target):
if not self.channelwise:
numerator = (input * target).sum()
denominator = (input * input).sum() + (target * target).sum()
loss = -2.0 * (numerator / denominator.clamp(min=self.eps))
else:
input = flatten_samples(input)
target = flatten_samples(target)
numerator = (input * target).sum(-1)
denominator = (input * input).sum(-1) + (target * target).sum(-1)
channelwise_loss = -2 * (numerator / denominator.clamp(min=self
.eps))
if self.weight is not None:
if channelwise_loss.dim() == 2:
channelwise_loss = channelwise_loss.squeeze(1)
weight = Variable(self.weight, requires_grad=False)
channelwise_loss = weight * channelwise_loss
loss = channelwise_loss.sum()
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (16 * x0 + 64 * (r1 // 16) + r1 % 16), xmask,
other=0.0)
tmp1 = tl.load(in_ptr1 + (16 * x0 + 64 * (r1 // 16) + r1 % 16), xmask,
other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp7 = tmp0 * tmp0
tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK])
tmp10 = tl.where(xmask, tmp8, 0)
tmp11 = tl.sum(tmp10, 1)[:, None]
tmp12 = tmp1 * tmp1
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tl.store(out_ptr0 + x0, tmp6, xmask)
tl.store(out_ptr1 + x0, tmp11, xmask)
tl.store(out_ptr2 + x0, tmp16, xmask)
@triton.jit
def triton_per_fused_add_clamp_div_mul_sum_1(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tl.load(in_ptr2 + r0, None)
tmp3 = tmp1 + tmp2
tmp4 = 1e-06
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp0 / tmp5
tmp7 = -2.0
tmp8 = tmp6 * tmp7
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.sum(tmp9, 1)[:, None]
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp11, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.float32)
buf1 = empty_strided_cuda((4,), (1,), torch.float32)
buf2 = empty_strided_cuda((4,), (1,), torch.float32)
get_raw_stream(0)
triton_per_fused_mul_sum_0[grid(4)](arg0_1, arg1_1, buf0, buf1,
buf2, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
buf3 = empty_strided_cuda((), (), torch.float32)
triton_per_fused_add_clamp_div_mul_sum_1[grid(1)](buf0, buf1, buf2,
buf3, 1, 4, XBLOCK=1, num_warps=2, num_stages=1)
del buf0
del buf1
del buf2
return buf3,
def assert_(condition, message='', exception_type=AssertionError):
"""Like assert, but with arbitrary exception types."""
if not condition:
raise exception_type(message)
def flatten_samples(tensor_or_variable):
"""
Flattens a tensor or a variable such that the channel axis is first and the sample axis
is second. The shapes are transformed as follows:
(N, C, H, W) --> (C, N * H * W)
(N, C, D, H, W) --> (C, N * D * H * W)
(N, C) --> (C, N)
The input must be atleast 2d.
"""
assert_(tensor_or_variable.dim() >= 2,
'Tensor or variable must be atleast 2D. Got one of dim {}.'.format(
tensor_or_variable.dim()), ShapeError)
num_channels = tensor_or_variable.size(1)
permute_axes = list(range(tensor_or_variable.dim()))
permute_axes[0], permute_axes[1] = permute_axes[1], permute_axes[0]
permuted = tensor_or_variable.permute(*permute_axes).contiguous()
flattened = permuted.view(num_channels, -1)
return flattened
class ShapeError(ValueError):
pass
class SorensenDiceLossNew(nn.Module):
"""
Computes a loss scalar, which when minimized maximizes the Sorensen-Dice similarity
between the input and the target. For both inputs and targets it must be the case that
`input_or_target.size(1) = num_channels`.
"""
def __init__(self, weight=None, channelwise=True, eps=1e-06):
"""
Parameters
----------
weight : torch.FloatTensor or torch.cuda.FloatTensor
Class weights. Applies only if `channelwise = True`.
channelwise : bool
Whether to apply the loss channelwise and sum the results (True)
or to apply it on all channels jointly (False).
"""
super(SorensenDiceLossNew, self).__init__()
self.register_buffer('weight', weight)
self.channelwise = channelwise
self.eps = eps
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| dearkafka/inferno | SorensenDiceLoss | false | 1,819 | [
"Apache-2.0"
] | 0 | e9e3b863fd1fc97cf94d08ac6b4f8df7665f996a | https://github.com/dearkafka/inferno/tree/e9e3b863fd1fc97cf94d08ac6b4f8df7665f996a | import torch
import torch.nn as nn
from torch.autograd import Variable
def assert_(condition, message='', exception_type=AssertionError):
"""Like assert, but with arbitrary exception types."""
if not condition:
raise exception_type(message)
def flatten_samples(tensor_or_variable):
"""
Flattens a tensor or a variable such that the channel axis is first and the sample axis
is second. The shapes are transformed as follows:
(N, C, H, W) --> (C, N * H * W)
(N, C, D, H, W) --> (C, N * D * H * W)
(N, C) --> (C, N)
The input must be atleast 2d.
"""
assert_(tensor_or_variable.dim() >= 2,
'Tensor or variable must be atleast 2D. Got one of dim {}.'.format(
tensor_or_variable.dim()), ShapeError)
num_channels = tensor_or_variable.size(1)
permute_axes = list(range(tensor_or_variable.dim()))
permute_axes[0], permute_axes[1] = permute_axes[1], permute_axes[0]
permuted = tensor_or_variable.permute(*permute_axes).contiguous()
flattened = permuted.view(num_channels, -1)
return flattened
class ShapeError(ValueError):
pass
class Model(nn.Module):
"""
Computes a loss scalar, which when minimized maximizes the Sorensen-Dice similarity
between the input and the target. For both inputs and targets it must be the case that
`input_or_target.size(1) = num_channels`.
"""
def __init__(self, weight=None, channelwise=True, eps=1e-06):
"""
Parameters
----------
weight : torch.FloatTensor or torch.cuda.FloatTensor
Class weights. Applies only if `channelwise = True`.
channelwise : bool
Whether to apply the loss channelwise and sum the results (True)
or to apply it on all channels jointly (False).
"""
super().__init__()
self.register_buffer('weight', weight)
self.channelwise = channelwise
self.eps = eps
def forward(self, input, target):
if not self.channelwise:
numerator = (input * target).sum()
denominator = (input * input).sum() + (target * target).sum()
loss = -2.0 * (numerator / denominator.clamp(min=self.eps))
else:
input = flatten_samples(input)
target = flatten_samples(target)
numerator = (input * target).sum(-1)
denominator = (input * input).sum(-1) + (target * target).sum(-1)
channelwise_loss = -2 * (numerator / denominator.clamp(min=self
.eps))
if self.weight is not None:
if channelwise_loss.dim() == 2:
channelwise_loss = channelwise_loss.squeeze(1)
weight = Variable(self.weight, requires_grad=False)
channelwise_loss = weight * channelwise_loss
loss = channelwise_loss.sum()
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
MTFullyConnected | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/5y/c5ycegys2ar7wz6epy2shzbmz2mkdkn4qmpsdbgrzhhu5ufhemkx.py
# Topologically Sorted Source Nodes: [y], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# y => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 8000
x1 = (xindex // 8000)
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr0 + (x0 + (8064*x1)), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/2d/c2dldo522mnd74n46v2lrkgeutrjrvjind75q4sshb25ha2k2cdg.py
# Topologically Sorted Source Nodes: [y_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# y_1 => relu_1
# Graph fragment:
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 4000
x1 = (xindex // 4000)
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr0 + (x0 + (4096*x1)), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/4o/c4o2m7ozfavldjjmvy54d5j4mjtrlcvp7qonixpmh5h2yro5ycts.py
# Topologically Sorted Source Nodes: [y_2], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# y_2 => relu_2
# Graph fragment:
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_5,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_2, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_2 = async_compile.triton('triton_poi_fused_relu_threshold_backward_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2000
x1 = (xindex // 2000)
tmp0 = tl.load(in_out_ptr0 + (x0 + (2016*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x0 + (2016*x1)), tmp4, xmask)
tl.store(out_ptr0 + (x0 + (2048*x1)), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/xp/cxpqywcqam7evubfwwa5zmt733w2zov6otomgqgpramgjdsnjg5k.py
# Topologically Sorted Source Nodes: [y_3], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# y_3 => sigmoid
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_7,), kwargs = {})
triton_poi_fused_sigmoid_3 = async_compile.triton('triton_poi_fused_sigmoid_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (8000, 4), (4, 1))
assert_size_stride(primals_2, (8000, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4000, 8000), (8000, 1))
assert_size_stride(primals_5, (4000, ), (1, ))
assert_size_stride(primals_6, (2000, 4000), (4000, 1))
assert_size_stride(primals_7, (2000, ), (1, ))
assert_size_stride(primals_8, (4, 2000), (2000, 1))
assert_size_stride(primals_9, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 8000), (8000, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 8000), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 8000), (128000, 32000, 8000, 1), 0); del buf0 # reuse
buf10 = empty_strided_cuda((4, 4, 4, 8000), (129024, 32256, 8064, 1), torch.bool)
# Topologically Sorted Source Nodes: [y], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf10, 512000, grid=grid(512000), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4000), (4000, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 8000), (8000, 1), 0), reinterpret_tensor(primals_4, (8000, 4000), (1, 8000), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4000), (64000, 16000, 4000, 1), 0); del buf2 # reuse
buf9 = empty_strided_cuda((4, 4, 4, 4000), (65536, 16384, 4096, 1), torch.bool)
# Topologically Sorted Source Nodes: [y_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf3, primals_5, buf9, 256000, grid=grid(256000), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 2000), (2016, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf3, (64, 4000), (4000, 1), 0), reinterpret_tensor(primals_6, (4000, 2000), (1, 4000), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 2000), (32256, 8064, 2016, 1), 0); del buf4 # reuse
buf8 = empty_strided_cuda((4, 4, 4, 2000), (32768, 8192, 2048, 1), torch.bool)
# Topologically Sorted Source Nodes: [y_2], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_2.run(buf5, primals_7, buf8, 128000, grid=grid(128000), stream=stream0)
del primals_7
buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf5, (64, 2000), (2016, 1), 0), reinterpret_tensor(primals_8, (2000, 4), (1, 2000), 0), out=buf6)
buf7 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf6 # reuse
# Topologically Sorted Source Nodes: [y_3], Original ATen: [aten.sigmoid]
triton_poi_fused_sigmoid_3.run(buf7, primals_9, 256, grid=grid(256), stream=stream0)
del primals_9
return (buf7, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 8000), (8000, 1), 0), reinterpret_tensor(buf3, (64, 4000), (4000, 1), 0), reinterpret_tensor(buf5, (64, 2000), (2016, 1), 0), buf7, primals_8, buf8, primals_6, buf9, primals_4, buf10, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((8000, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((8000, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4000, 8000), (8000, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4000, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((2000, 4000), (4000, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((2000, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 2000), (2000, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import time
import torch
import numpy as np
from torch import nn
from torch import optim
from torch.nn import functional as F
class Base(nn.Module):
""" This class is the base structure for all of classification/regression DNN models.
Mainly, it provides the general methods for training, evaluating model and predcting the given data.
"""
def fit(self, train_loader, valid_loader, out, epochs=100, lr=0.0001):
"""Training the DNN model, similar to the scikit-learn or Keras style.
In the end, the optimal value of parameters will also be persisted on the hard drive.
Arguments:
train_loader (DataLoader): Data loader for training set,
including m X n target FloatTensor and m X l label FloatTensor
(m is the No. of sample, n is the No. of features, l is the No. of classes or tasks)
valid_loader (DataLoader): Data loader for validation set.
The data structure is as same as loader_train.
out (str): the file path for the model file (suffix with '.pkg')
and log file (suffix with '.log').
epochs(int, optional): The maximum of training epochs (default: 100)
lr (float, optional): learning rate (default: 1e-4)
"""
if 'optim' in self.__dict__:
optimizer = self.optim
else:
optimizer = optim.Adam(self.parameters(), lr=lr)
best_loss = np.inf
last_save = 0
log = open(out + '.log', 'w')
for epoch in range(epochs):
time.time()
for param_group in optimizer.param_groups:
param_group['lr'] = lr * (1 - 1 / epochs) ** (epoch * 10)
for i, (Xb, yb) in enumerate(train_loader):
Xb, yb = Xb, yb
optimizer.zero_grad()
y_ = self.forward(Xb, istrain=True)
ix = yb == yb
yb, y_ = yb[ix], y_[ix]
loss = self.criterion(y_, yb)
loss.backward()
optimizer.step()
loss_valid = self.evaluate(valid_loader)
None
if loss_valid < best_loss:
torch.save(self.state_dict(), out + '.pkg')
None
best_loss = loss_valid
last_save = epoch
else:
None
if epoch - last_save > 100:
break
log.close()
self.load_state_dict(torch.load(out + '.pkg'))
def evaluate(self, loader):
"""Evaluating the performance of the DNN model.
Arguments:
loader (torch.utils.data.DataLoader): data loader for test set,
including m X n target FloatTensor and l X n label FloatTensor
(m is the No. of sample, n is the No. of features, l is the No. of classes or tasks)
Return:
loss (float): the average loss value based on the calculation of loss function with given test set.
"""
loss = 0
for Xb, yb in loader:
Xb, yb = Xb, yb
y_ = self.forward(Xb)
ix = yb == yb
yb, y_ = yb[ix], y_[ix]
loss += self.criterion(y_, yb).data[0]
loss = loss / len(loader)
return loss
def predict(self, loader):
"""Predicting the probability of each sample in the given dataset.
Arguments:
loader (torch.utils.data.DataLoader): data loader for test set,
only including m X n target FloatTensor
(m is the No. of sample, n is the No. of features)
Return:
score (ndarray): probability of each sample in the given dataset,
it is a m X l FloatTensor (m is the No. of sample, l is the No. of classes or tasks.)
"""
score = []
for Xb, yb in loader:
Xb = Xb
y_ = self.forward(Xb)
score.append(y_.detach().cpu())
score = torch.cat(score, dim=0).numpy()
return score
class MTFullyConnected(Base):
"""Multi-task DNN classification/regression model. It contains four fully connected layers
between which are dropout layer for robustness.
Arguments:
n_dim (int): the No. of columns (features) for input tensor
n_task (int): the No. of columns (tasks) for output tensor.
is_reg (bool, optional): Regression model (True) or Classification model (False)
"""
def __init__(self, n_dim, n_task, is_reg=False):
super(MTFullyConnected, self).__init__()
self.n_task = n_task
self.dropout = nn.Dropout(0.25)
self.fc0 = nn.Linear(n_dim, 8000)
self.fc1 = nn.Linear(8000, 4000)
self.fc2 = nn.Linear(4000, 2000)
self.output = nn.Linear(2000, n_task)
self.is_reg = is_reg
if is_reg:
self.criterion = nn.MSELoss()
else:
self.criterion = nn.BCELoss()
self.activation = nn.Sigmoid()
self
def forward(self, X, istrain=False):
"""Invoke the class directly as a function
Arguments:
X (FloatTensor): m X n FloatTensor, m is the No. of samples, n is the No. of features.
istrain (bool, optional): is it invoked during training process (True)
or just for prediction (False)
Return:
y (FloatTensor): m X l FloatTensor, m is the No. of samples, n is the No. of tasks
"""
y = F.relu(self.fc0(X))
if istrain:
y = self.dropout(y)
y = F.relu(self.fc1(y))
if istrain:
y = self.dropout(y)
y = F.relu(self.fc2(y))
if istrain:
y = self.dropout(y)
if self.is_reg:
y = self.output(y)
else:
y = self.activation(self.output(y))
return y
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_dim': 4, 'n_task': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import time
import numpy as np
from torch import nn
from torch import optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 8000
x1 = xindex // 8000
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + (x0 + 8064 * x1), tmp6, None)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 4000
x1 = xindex // 4000
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + (x0 + 4096 * x1), tmp6, None)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2000
x1 = xindex // 2000
tmp0 = tl.load(in_out_ptr0 + (x0 + 2016 * x1), xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x0 + 2016 * x1), tmp4, xmask)
tl.store(out_ptr0 + (x0 + 2048 * x1), tmp6, xmask)
@triton.jit
def triton_poi_fused_sigmoid_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (8000, 4), (4, 1))
assert_size_stride(primals_2, (8000,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4000, 8000), (8000, 1))
assert_size_stride(primals_5, (4000,), (1,))
assert_size_stride(primals_6, (2000, 4000), (4000, 1))
assert_size_stride(primals_7, (2000,), (1,))
assert_size_stride(primals_8, (4, 2000), (2000, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 8000), (8000, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 8000), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 8000), (128000, 32000,
8000, 1), 0)
del buf0
buf10 = empty_strided_cuda((4, 4, 4, 8000), (129024, 32256, 8064, 1
), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(512000)](buf1,
primals_2, buf10, 512000, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4000), (4000, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 8000), (8000, 1), 0
), reinterpret_tensor(primals_4, (8000, 4000), (1, 8000), 0),
out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4000), (64000, 16000,
4000, 1), 0)
del buf2
buf9 = empty_strided_cuda((4, 4, 4, 4000), (65536, 16384, 4096, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(256000)](buf3,
primals_5, buf9, 256000, XBLOCK=512, num_warps=8, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 2000), (2016, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 4000), (4000, 1), 0
), reinterpret_tensor(primals_6, (4000, 2000), (1, 4000), 0),
out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 2000), (32256, 8064, 2016,
1), 0)
del buf4
buf8 = empty_strided_cuda((4, 4, 4, 2000), (32768, 8192, 2048, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(128000)](buf5,
primals_7, buf8, 128000, XBLOCK=512, num_warps=8, num_stages=1)
del primals_7
buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf5, (64, 2000), (2016, 1), 0
), reinterpret_tensor(primals_8, (2000, 4), (1, 2000), 0), out=buf6
)
buf7 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf6
triton_poi_fused_sigmoid_3[grid(256)](buf7, primals_9, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_9
return buf7, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 8000), (8000, 1), 0
), reinterpret_tensor(buf3, (64, 4000), (4000, 1), 0
), reinterpret_tensor(buf5, (64, 2000), (2016, 1), 0
), buf7, primals_8, buf8, primals_6, buf9, primals_4, buf10
class Base(nn.Module):
""" This class is the base structure for all of classification/regression DNN models.
Mainly, it provides the general methods for training, evaluating model and predcting the given data.
"""
def fit(self, train_loader, valid_loader, out, epochs=100, lr=0.0001):
"""Training the DNN model, similar to the scikit-learn or Keras style.
In the end, the optimal value of parameters will also be persisted on the hard drive.
Arguments:
train_loader (DataLoader): Data loader for training set,
including m X n target FloatTensor and m X l label FloatTensor
(m is the No. of sample, n is the No. of features, l is the No. of classes or tasks)
valid_loader (DataLoader): Data loader for validation set.
The data structure is as same as loader_train.
out (str): the file path for the model file (suffix with '.pkg')
and log file (suffix with '.log').
epochs(int, optional): The maximum of training epochs (default: 100)
lr (float, optional): learning rate (default: 1e-4)
"""
if 'optim' in self.__dict__:
optimizer = self.optim
else:
optimizer = optim.Adam(self.parameters(), lr=lr)
best_loss = np.inf
last_save = 0
log = open(out + '.log', 'w')
for epoch in range(epochs):
time.time()
for param_group in optimizer.param_groups:
param_group['lr'] = lr * (1 - 1 / epochs) ** (epoch * 10)
for i, (Xb, yb) in enumerate(train_loader):
Xb, yb = Xb, yb
optimizer.zero_grad()
y_ = self.forward(Xb, istrain=True)
ix = yb == yb
yb, y_ = yb[ix], y_[ix]
loss = self.criterion(y_, yb)
loss.backward()
optimizer.step()
loss_valid = self.evaluate(valid_loader)
None
if loss_valid < best_loss:
torch.save(self.state_dict(), out + '.pkg')
None
best_loss = loss_valid
last_save = epoch
else:
None
if epoch - last_save > 100:
break
log.close()
self.load_state_dict(torch.load(out + '.pkg'))
def evaluate(self, loader):
"""Evaluating the performance of the DNN model.
Arguments:
loader (torch.utils.data.DataLoader): data loader for test set,
including m X n target FloatTensor and l X n label FloatTensor
(m is the No. of sample, n is the No. of features, l is the No. of classes or tasks)
Return:
loss (float): the average loss value based on the calculation of loss function with given test set.
"""
loss = 0
for Xb, yb in loader:
Xb, yb = Xb, yb
y_ = self.forward(Xb)
ix = yb == yb
yb, y_ = yb[ix], y_[ix]
loss += self.criterion(y_, yb).data[0]
loss = loss / len(loader)
return loss
def predict(self, loader):
"""Predicting the probability of each sample in the given dataset.
Arguments:
loader (torch.utils.data.DataLoader): data loader for test set,
only including m X n target FloatTensor
(m is the No. of sample, n is the No. of features)
Return:
score (ndarray): probability of each sample in the given dataset,
it is a m X l FloatTensor (m is the No. of sample, l is the No. of classes or tasks.)
"""
score = []
for Xb, yb in loader:
Xb = Xb
y_ = self.forward(Xb)
score.append(y_.detach().cpu())
score = torch.cat(score, dim=0).numpy()
return score
class MTFullyConnectedNew(Base):
"""Multi-task DNN classification/regression model. It contains four fully connected layers
between which are dropout layer for robustness.
Arguments:
n_dim (int): the No. of columns (features) for input tensor
n_task (int): the No. of columns (tasks) for output tensor.
is_reg (bool, optional): Regression model (True) or Classification model (False)
"""
def __init__(self, n_dim, n_task, is_reg=False):
super(MTFullyConnectedNew, self).__init__()
self.n_task = n_task
self.dropout = nn.Dropout(0.25)
self.fc0 = nn.Linear(n_dim, 8000)
self.fc1 = nn.Linear(8000, 4000)
self.fc2 = nn.Linear(4000, 2000)
self.output = nn.Linear(2000, n_task)
self.is_reg = is_reg
if is_reg:
self.criterion = nn.MSELoss()
else:
self.criterion = nn.BCELoss()
self.activation = nn.Sigmoid()
self
def forward(self, input_0):
primals_1 = self.fc0.weight
primals_2 = self.fc0.bias
primals_4 = self.fc1.weight
primals_5 = self.fc1.bias
primals_6 = self.fc2.weight
primals_7 = self.fc2.bias
primals_8 = self.output.weight
primals_9 = self.output.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| cthoyt/DrugEx | MTFullyConnected | false | 1,820 | [
"MIT"
] | 0 | 9e4d31adb2c65d0afc852948f502c79dcf8308a3 | https://github.com/cthoyt/DrugEx/tree/9e4d31adb2c65d0afc852948f502c79dcf8308a3 | import time
import torch
import numpy as np
from torch import nn
from torch import optim
from torch.nn import functional as F
class Base(nn.Module):
""" This class is the base structure for all of classification/regression DNN models.
Mainly, it provides the general methods for training, evaluating model and predcting the given data.
"""
def fit(self, train_loader, valid_loader, out, epochs=100, lr=0.0001):
"""Training the DNN model, similar to the scikit-learn or Keras style.
In the end, the optimal value of parameters will also be persisted on the hard drive.
Arguments:
train_loader (DataLoader): Data loader for training set,
including m X n target FloatTensor and m X l label FloatTensor
(m is the No. of sample, n is the No. of features, l is the No. of classes or tasks)
valid_loader (DataLoader): Data loader for validation set.
The data structure is as same as loader_train.
out (str): the file path for the model file (suffix with '.pkg')
and log file (suffix with '.log').
epochs(int, optional): The maximum of training epochs (default: 100)
lr (float, optional): learning rate (default: 1e-4)
"""
if 'optim' in self.__dict__:
optimizer = self.optim
else:
optimizer = optim.Adam(self.parameters(), lr=lr)
best_loss = np.inf
last_save = 0
log = open(out + '.log', 'w')
for epoch in range(epochs):
time.time()
for param_group in optimizer.param_groups:
param_group['lr'] = lr * (1 - 1 / epochs) ** (epoch * 10)
for i, (Xb, yb) in enumerate(train_loader):
Xb, yb = Xb, yb
optimizer.zero_grad()
y_ = self.forward(Xb, istrain=True)
ix = yb == yb
yb, y_ = yb[ix], y_[ix]
loss = self.criterion(y_, yb)
loss.backward()
optimizer.step()
loss_valid = self.evaluate(valid_loader)
None
if loss_valid < best_loss:
torch.save(self.state_dict(), out + '.pkg')
None
best_loss = loss_valid
last_save = epoch
else:
None
if epoch - last_save > 100:
break
log.close()
self.load_state_dict(torch.load(out + '.pkg'))
def evaluate(self, loader):
"""Evaluating the performance of the DNN model.
Arguments:
loader (torch.utils.data.DataLoader): data loader for test set,
including m X n target FloatTensor and l X n label FloatTensor
(m is the No. of sample, n is the No. of features, l is the No. of classes or tasks)
Return:
loss (float): the average loss value based on the calculation of loss function with given test set.
"""
loss = 0
for Xb, yb in loader:
Xb, yb = Xb, yb
y_ = self.forward(Xb)
ix = yb == yb
yb, y_ = yb[ix], y_[ix]
loss += self.criterion(y_, yb).data[0]
loss = loss / len(loader)
return loss
def predict(self, loader):
"""Predicting the probability of each sample in the given dataset.
Arguments:
loader (torch.utils.data.DataLoader): data loader for test set,
only including m X n target FloatTensor
(m is the No. of sample, n is the No. of features)
Return:
score (ndarray): probability of each sample in the given dataset,
it is a m X l FloatTensor (m is the No. of sample, l is the No. of classes or tasks.)
"""
score = []
for Xb, yb in loader:
Xb = Xb
y_ = self.forward(Xb)
score.append(y_.detach().cpu())
# ... truncated (>4000 chars) for memory efficiency |
Maxout | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/54/c54fiiozms64dqfszq2hf52cdztx43kas6yivnlda7p3bxzbtzle.py
# Topologically Sorted Source Nodes: [max_1], Original ATen: [aten.max]
# Source node to ATen node mapping:
# max_1 => getitem
# Graph fragment:
# %getitem : [num_users=1] = call_function[target=operator.getitem](args = (%max_1, 0), kwargs = {})
triton_poi_fused_max_0 = async_compile.triton('triton_poi_fused_max_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tl.store(out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [max_1], Original ATen: [aten.max]
stream0 = get_raw_stream(0)
triton_poi_fused_max_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class Maxout(nn.Module):
def __init__(self, pool_size):
super().__init__()
self._pool_size = pool_size
def forward(self, x):
assert x.shape[-1
] % self._pool_size == 0, 'Wrong input last dim size ({}) for Maxout({})'.format(
x.shape[-1], self._pool_size)
m, _i = x.view(*x.shape[:-1], x.shape[-1] // self._pool_size, self.
_pool_size).max(-1)
return m
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'pool_size': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_max_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tl.store(out_ptr0 + x0, tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_max_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del arg0_1
return buf0,
class MaxoutNew(nn.Module):
def __init__(self, pool_size):
super().__init__()
self._pool_size = pool_size
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| demdecuong/SEGMENT | Maxout | false | 1,821 | [
"MIT"
] | 0 | 629dc55dcbc9629b35fb237e313b95ceacecdc89 | https://github.com/demdecuong/SEGMENT/tree/629dc55dcbc9629b35fb237e313b95ceacecdc89 | import torch
from torch import nn
class Model(nn.Module):
def __init__(self, pool_size):
super().__init__()
self._pool_size = pool_size
def forward(self, x):
assert x.shape[-1
] % self._pool_size == 0, 'Wrong input last dim size ({}) for Maxout({})'.format(
x.shape[-1], self._pool_size)
m, _i = x.view(*x.shape[:-1], x.shape[-1] // self._pool_size, self.
_pool_size).max(-1)
return m
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
Debayer2x2 | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/4t/c4tuf2l6x2zj65cq45cofmz2gojaupedfzjwk73xw4vxbeoyxrqg.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten._to_copy, aten.arange, aten.add, aten.mul, aten.sub, aten.clamp, aten._unsafe_index]
# Source node to ATen node mapping:
# x_1 => _unsafe_index, _unsafe_index_1, _unsafe_index_2, _unsafe_index_3, add_2, add_4, add_5, add_6, clamp_max_2, clamp_max_3, clamp_min_1, clamp_min_2, clamp_min_3, convert_element_type_1, convert_element_type_2, convert_element_type_3, iota_1, mul_1, mul_2, mul_3, mul_4, sub_1, sub_2, sub_3, sub_4, sub_5, sub_6
# Graph fragment:
# %convert_element_type_1 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view, torch.int64), kwargs = {})
# %iota_1 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (64,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %convert_element_type_2 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota_1, torch.float32), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_2, 0.5), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_2, 0.5), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_1, 0.5), kwargs = {})
# %clamp_min_1 : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_1, 0.0), kwargs = {})
# %convert_element_type_3 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%clamp_min_1, torch.int64), kwargs = {})
# %_unsafe_index_3 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution, [None, None, %clamp_max, %clamp_max_1]), kwargs = {})
# %_unsafe_index_2 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution, [None, None, %clamp_max, %convert_element_type_3]), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_3, %_unsafe_index_2), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min_1, %convert_element_type_3), kwargs = {})
# %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_2, 0.0), kwargs = {})
# %clamp_max_2 : [num_users=2] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_2, 1.0), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, %clamp_max_2), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_2, %mul_3), kwargs = {})
# %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution, [None, None, %convert_element_type_1, %clamp_max_1]), kwargs = {})
# %_unsafe_index : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%convolution, [None, None, %convert_element_type_1, %convert_element_type_3]), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_1, %_unsafe_index), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %clamp_max_2), kwargs = {})
# %add_4 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index, %mul_2), kwargs = {})
# %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_5, %add_4), kwargs = {})
# %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %convert_element_type_1), kwargs = {})
# %clamp_min_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_5, 0.0), kwargs = {})
# %clamp_max_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_3, 1.0), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_6, %clamp_max_3), kwargs = {})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_4, %mul_4), kwargs = {})
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0 = async_compile.triton('triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 49152
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 64) % 64
x0 = xindex % 64
x2 = (xindex // 4096)
x4 = xindex
tmp0 = x1
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.full([1], 1, tl.int64)
tmp10 = tmp8 + tmp9
tmp11 = tl.full([1], 31, tl.int64)
tmp12 = triton_helpers.minimum(tmp10, tmp11)
tmp13 = x0
tmp14 = tmp13.to(tl.float32)
tmp15 = tmp14 + tmp2
tmp16 = tmp15 * tmp2
tmp17 = tmp16 - tmp2
tmp18 = triton_helpers.maximum(tmp17, tmp6)
tmp19 = tmp18.to(tl.int32)
tmp20 = tmp19 + tmp9
tmp21 = triton_helpers.minimum(tmp20, tmp11)
tmp22 = tl.load(in_ptr0 + (tmp21 + (32*tmp12) + (1024*x2)), None, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (tmp19 + (32*tmp12) + (1024*x2)), None, eviction_policy='evict_last')
tmp24 = tmp22 - tmp23
tmp25 = tmp19.to(tl.float32)
tmp26 = tmp18 - tmp25
tmp27 = triton_helpers.maximum(tmp26, tmp6)
tmp28 = 1.0
tmp29 = triton_helpers.minimum(tmp27, tmp28)
tmp30 = tmp24 * tmp29
tmp31 = tmp23 + tmp30
tmp32 = tl.load(in_ptr0 + (tmp19 + (32*tmp8) + (1024*x2)), None, eviction_policy='evict_last')
tmp33 = tl.load(in_ptr0 + (tmp21 + (32*tmp8) + (1024*x2)), None, eviction_policy='evict_last')
tmp34 = tmp33 - tmp32
tmp35 = tmp34 * tmp29
tmp36 = tmp32 + tmp35
tmp37 = tmp31 - tmp36
tmp38 = tmp8.to(tl.float32)
tmp39 = tmp7 - tmp38
tmp40 = triton_helpers.maximum(tmp39, tmp6)
tmp41 = triton_helpers.minimum(tmp40, tmp28)
tmp42 = tmp37 * tmp41
tmp43 = tmp36 + tmp42
tl.store(in_out_ptr0 + (x4), tmp43, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (3, 1, 2, 2), (4, 4, 2, 1))
assert_size_stride(arg1_1, (4, 1, 64, 64), (4096, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(arg1_1, arg0_1, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 3, 32, 32), (3072, 1024, 32, 1))
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1), torch.float32)
buf2 = buf1; del buf1 # reuse
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten._to_copy, aten.arange, aten.add, aten.mul, aten.sub, aten.clamp, aten._unsafe_index]
stream0 = get_raw_stream(0)
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0.run(buf3, buf0, 49152, grid=grid(49152), stream=stream0)
del buf0
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((3, 1, 2, 2), (4, 4, 2, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class Debayer2x2(nn.Module):
"""Demosaicing of Bayer images using 2x2 convolutions.
Requires BG-Bayer color filter array layout. That is,
the image[1,1]='B', image[1,2]='G'.
"""
def __init__(self):
super(Debayer2x2, self).__init__()
self.kernels = nn.Parameter(torch.tensor([[1, 0], [0, 0], [0, 0.5],
[0.5, 0], [0, 0], [0, 1]]).view(3, 1, 2, 2), requires_grad=False)
def forward(self, x):
"""Debayer image.
Parameters
----------
x : Bx1xHxW tensor
Images to debayer
Returns
-------
rgb : Bx3xHxW tensor
Color images in RGB channel order.
"""
x = F.conv2d(x, self.kernels, stride=2)
x = F.interpolate(x, scale_factor=2, mode='bilinear', align_corners
=False)
return x
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0(
in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 64 % 64
x0 = xindex % 64
x2 = xindex // 4096
x4 = xindex
tmp0 = x1
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.full([1], 1, tl.int64)
tmp10 = tmp8 + tmp9
tmp11 = tl.full([1], 31, tl.int64)
tmp12 = triton_helpers.minimum(tmp10, tmp11)
tmp13 = x0
tmp14 = tmp13.to(tl.float32)
tmp15 = tmp14 + tmp2
tmp16 = tmp15 * tmp2
tmp17 = tmp16 - tmp2
tmp18 = triton_helpers.maximum(tmp17, tmp6)
tmp19 = tmp18.to(tl.int32)
tmp20 = tmp19 + tmp9
tmp21 = triton_helpers.minimum(tmp20, tmp11)
tmp22 = tl.load(in_ptr0 + (tmp21 + 32 * tmp12 + 1024 * x2), None,
eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (tmp19 + 32 * tmp12 + 1024 * x2), None,
eviction_policy='evict_last')
tmp24 = tmp22 - tmp23
tmp25 = tmp19.to(tl.float32)
tmp26 = tmp18 - tmp25
tmp27 = triton_helpers.maximum(tmp26, tmp6)
tmp28 = 1.0
tmp29 = triton_helpers.minimum(tmp27, tmp28)
tmp30 = tmp24 * tmp29
tmp31 = tmp23 + tmp30
tmp32 = tl.load(in_ptr0 + (tmp19 + 32 * tmp8 + 1024 * x2), None,
eviction_policy='evict_last')
tmp33 = tl.load(in_ptr0 + (tmp21 + 32 * tmp8 + 1024 * x2), None,
eviction_policy='evict_last')
tmp34 = tmp33 - tmp32
tmp35 = tmp34 * tmp29
tmp36 = tmp32 + tmp35
tmp37 = tmp31 - tmp36
tmp38 = tmp8.to(tl.float32)
tmp39 = tmp7 - tmp38
tmp40 = triton_helpers.maximum(tmp39, tmp6)
tmp41 = triton_helpers.minimum(tmp40, tmp28)
tmp42 = tmp37 * tmp41
tmp43 = tmp36 + tmp42
tl.store(in_out_ptr0 + x4, tmp43, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (3, 1, 2, 2), (4, 4, 2, 1))
assert_size_stride(arg1_1, (4, 1, 64, 64), (4096, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(arg1_1, arg0_1, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 3, 32, 32), (3072, 1024, 32, 1))
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1),
torch.float32)
buf2 = buf1
del buf1
buf3 = buf2
del buf2
get_raw_stream(0)
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0[grid
(49152)](buf3, buf0, 49152, XBLOCK=256, num_warps=4, num_stages=1)
del buf0
return buf3,
class Debayer2x2New(nn.Module):
"""Demosaicing of Bayer images using 2x2 convolutions.
Requires BG-Bayer color filter array layout. That is,
the image[1,1]='B', image[1,2]='G'.
"""
def __init__(self):
super(Debayer2x2New, self).__init__()
self.kernels = nn.Parameter(torch.tensor([[1, 0], [0, 0], [0, 0.5],
[0.5, 0], [0, 0], [0, 1]]).view(3, 1, 2, 2), requires_grad=False)
def forward(self, input_0):
arg0_1 = self.kernels
arg1_1 = input_0
output = call([arg0_1, arg1_1])
return output[0]
| delldu/ImageClean | Debayer2x2 | false | 1,822 | [
"MIT"
] | 0 | ffa5b180d36afb3840c6b36c08a767c520068498 | https://github.com/delldu/ImageClean/tree/ffa5b180d36afb3840c6b36c08a767c520068498 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
"""Demosaicing of Bayer images using 2x2 convolutions.
Requires BG-Bayer color filter array layout. That is,
the image[1,1]='B', image[1,2]='G'.
"""
def __init__(self):
super().__init__()
self.kernels = nn.Parameter(torch.tensor([[1, 0], [0, 0], [0, 0.5],
[0.5, 0], [0, 0], [0, 1]]).view(3, 1, 2, 2), requires_grad=False)
def forward(self, x):
"""Debayer image.
Parameters
----------
x : Bx1xHxW tensor
Images to debayer
Returns
-------
rgb : Bx3xHxW tensor
Color images in RGB channel order.
"""
x = F.conv2d(x, self.kernels, stride=2)
x = F.interpolate(x, scale_factor=2, mode='bilinear', align_corners
=False)
return x
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return []
|
MuSigmaEncoder | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/r3/cr3febcwm3t44fuoitsx3ou2p6xg4sk4f7unagmmrvffasxf47te.py
# Topologically Sorted Source Nodes: [hidden], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# hidden => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/hh/chhqj73wauyyzrvkvg6ib7lh5drbg7bq7ddygbqpehxytxvjdtkd.py
# Topologically Sorted Source Nodes: [sigmoid, mul, sigma], Original ATen: [aten.sigmoid, aten.mul, aten.add]
# Source node to ATen node mapping:
# mul => mul
# sigma => add
# sigmoid => sigmoid
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_5,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, 0.9), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 0.1), kwargs = {})
triton_poi_fused_add_mul_sigmoid_1 = async_compile.triton('triton_poi_fused_add_mul_sigmoid_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sigmoid_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_sigmoid_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.sigmoid(tmp0)
tmp2 = 0.9
tmp3 = tmp1 * tmp2
tmp4 = 0.1
tmp5 = tmp3 + tmp4
tl.store(out_ptr0 + (x0), tmp5, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [hidden], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf5, 256, grid=grid(256), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mu], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_7
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sigmoid, mul, sigma], Original ATen: [aten.sigmoid, aten.mul, aten.add]
triton_poi_fused_add_mul_sigmoid_1.run(buf3, buf4, 256, grid=grid(256), stream=stream0)
return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf3, primals_6, primals_4, buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from typing import Tuple
from torch import nn
class MuSigmaEncoder(nn.Module):
"""
Maps a representation r to mu and sigma which will define the normal
distribution from which we sample the latent variable z.
Parameters
----------
r_dim : int
Dimension of output representation r.
z_dim : int
Dimension of latent variable z.
"""
def __init__(self, r_dim: 'int', z_dim: 'int') ->None:
super(MuSigmaEncoder, self).__init__()
self.r_dim = r_dim
self.z_dim = z_dim
self.r_to_hidden = nn.Linear(r_dim, r_dim)
self.hidden_to_mu = nn.Linear(r_dim, z_dim)
self.hidden_to_sigma = nn.Linear(r_dim, z_dim)
def forward(self, r: 'torch.Tensor') ->Tuple[torch.Tensor, torch.Tensor]:
"""
r : torch.Tensor
Shape (batch_size, r_dim)
"""
hidden = torch.relu(self.r_to_hidden(r))
mu = self.hidden_to_mu(hidden)
sigma = 0.1 + 0.9 * torch.sigmoid(self.hidden_to_sigma(hidden))
return mu, sigma
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'r_dim': 4, 'z_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_mul_sigmoid_1(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp2 = 0.9
tmp3 = tmp1 * tmp2
tmp4 = 0.1
tmp5 = tmp3 + tmp4
tl.store(out_ptr0 + x0, tmp5, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf3)
del primals_7
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mul_sigmoid_1[grid(256)](buf3, buf4, 256,
XBLOCK=128, num_warps=4, num_stages=1)
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0
), buf3, primals_6, primals_4, buf5
class MuSigmaEncoderNew(nn.Module):
"""
Maps a representation r to mu and sigma which will define the normal
distribution from which we sample the latent variable z.
Parameters
----------
r_dim : int
Dimension of output representation r.
z_dim : int
Dimension of latent variable z.
"""
def __init__(self, r_dim: 'int', z_dim: 'int') ->None:
super(MuSigmaEncoderNew, self).__init__()
self.r_dim = r_dim
self.z_dim = z_dim
self.r_to_hidden = nn.Linear(r_dim, r_dim)
self.hidden_to_mu = nn.Linear(r_dim, z_dim)
self.hidden_to_sigma = nn.Linear(r_dim, z_dim)
def forward(self, input_0):
primals_1 = self.r_to_hidden.weight
primals_2 = self.r_to_hidden.bias
primals_4 = self.hidden_to_mu.weight
primals_5 = self.hidden_to_mu.bias
primals_6 = self.hidden_to_sigma.weight
primals_7 = self.hidden_to_sigma.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0], output[1]
| deltaskelta/neural-processes | MuSigmaEncoder | false | 1,823 | [
"MIT"
] | 0 | 34a6b98b7a9142f5e5f87f7f1644217d5aa9e1bb | https://github.com/deltaskelta/neural-processes/tree/34a6b98b7a9142f5e5f87f7f1644217d5aa9e1bb | import torch
from typing import Tuple
from torch import nn
class Model(nn.Module):
"""
Maps a representation r to mu and sigma which will define the normal
distribution from which we sample the latent variable z.
Parameters
----------
r_dim : int
Dimension of output representation r.
z_dim : int
Dimension of latent variable z.
"""
def __init__(self, r_dim: 'int', z_dim: 'int') ->None:
super().__init__()
self.r_dim = r_dim
self.z_dim = z_dim
self.r_to_hidden = nn.Linear(r_dim, r_dim)
self.hidden_to_mu = nn.Linear(r_dim, z_dim)
self.hidden_to_sigma = nn.Linear(r_dim, z_dim)
def forward(self, r: 'torch.Tensor') ->Tuple[torch.Tensor, torch.Tensor]:
"""
r : torch.Tensor
Shape (batch_size, r_dim)
"""
hidden = torch.relu(self.r_to_hidden(r))
mu = self.hidden_to_mu(hidden)
sigma = 0.1 + 0.9 * torch.sigmoid(self.hidden_to_sigma(hidden))
return mu, sigma
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
fixed_loss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/jb/cjbprg2adil5ynwgs27eksae22pr7nqq74cbmg7xyyfifovo2s4i.py
# Topologically Sorted Source Nodes: [sub, pow_1, h_tv], Original ATen: [aten.sub, aten.pow, aten.sum]
# Source node to ATen node mapping:
# h_tv => sum_1
# pow_1 => pow_1
# sub => sub
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_3, %slice_7), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%pow_1,), kwargs = {})
triton_per_fused_pow_sub_sum_0 = async_compile.triton('triton_per_fused_pow_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_pow_sub_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_pow_sub_sum_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 192
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r0 = rindex % 12
r1 = (rindex // 12)
tmp0 = tl.load(in_ptr0 + (4 + r0 + (16*r1)), rmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (r0 + (16*r1)), rmask, other=0.0)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(rmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/z3/cz3b4qhx5z7q2mtytwrrr7736hknw6lpeqtgoccvjmlfybga7vh2.py
# Topologically Sorted Source Nodes: [sub_1, pow_2, w_tv], Original ATen: [aten.sub, aten.pow, aten.sum]
# Source node to ATen node mapping:
# pow_2 => pow_2
# sub_1 => sub_1
# w_tv => sum_2
# Graph fragment:
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_12, %slice_16), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%pow_2,), kwargs = {})
triton_per_fused_pow_sub_sum_1 = async_compile.triton('triton_per_fused_pow_sub_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_pow_sub_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_pow_sub_sum_1(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 192
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r0 = rindex % 3
r1 = (rindex // 3)
tmp0 = tl.load(in_ptr0 + (1 + r0 + (4*r1)), rmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (r0 + (4*r1)), rmask, other=0.0)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(rmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/ad/cadcluppyfzj6ua3ag6vqkfwx3prkvvszqssvuzsu2tqrhus4oie.py
# Topologically Sorted Source Nodes: [sub_2, pow_3, mean, mul, sub_3, relu, sub_4, abs_1, sub_5, pow_4, mul_1, mean_1, mul_2, add_1, truediv, truediv_1, tvloss, mul_3, loss], Original ATen: [aten.sub, aten.pow, aten.mean, aten.mul, aten.relu, aten.rsub, aten.abs, aten.add, aten.div]
# Source node to ATen node mapping:
# abs_1 => abs_1
# add_1 => add_1
# loss => add_2
# mean => mean
# mean_1 => mean_1
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# mul_3 => mul_3
# pow_3 => pow_3
# pow_4 => pow_4
# relu => relu
# sub_2 => sub_2
# sub_3 => sub_3
# sub_4 => sub_4
# sub_5 => sub_5
# truediv => div
# truediv_1 => div_1
# tvloss => add
# Graph fragment:
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %arg2_1), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_2, 2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_3,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg3_1, 0.5), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg4_1, %arg0_1), kwargs = {})
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_3,), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (0.3, %relu), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub_4,), kwargs = {})
# %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg4_1), kwargs = {})
# %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_5, 2), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%abs_1, %pow_4), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul_1,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %mean_1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, %mul_2), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, 48), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_2, 48), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, %div_1), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 0.05), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %mul_3), kwargs = {})
triton_per_fused_abs_add_div_mean_mul_pow_relu_rsub_sub_2 = async_compile.triton('triton_per_fused_abs_add_div_mean_mul_pow_relu_rsub_sub_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {8: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 9), equal_to_1=(8,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_add_div_mean_mul_pow_relu_rsub_sub_2', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 7, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_abs_add_div_mean_mul_pow_relu_rsub_sub_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr2, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp7 = tl.load(in_ptr2 + (r0), None)
tmp8 = tl.load(in_ptr3 + (r0), None)
tmp23 = tl.load(in_ptr4 + (r0), None)
tmp29 = tl.load(in_ptr5 + (0))
tmp30 = tl.broadcast_to(tmp29, [RBLOCK])
tmp33 = tl.load(in_ptr6 + (0))
tmp34 = tl.broadcast_to(tmp33, [RBLOCK])
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp9 = tmp7 - tmp8
tmp10 = tl.full([1], 0, tl.int32)
tmp11 = triton_helpers.maximum(tmp10, tmp9)
tmp12 = 0.3
tmp13 = tmp12 - tmp11
tmp14 = tl_math.abs(tmp13)
tmp15 = tmp8 - tmp7
tmp16 = tmp15 * tmp15
tmp17 = tmp14 * tmp16
tmp18 = tl.broadcast_to(tmp17, [RBLOCK])
tmp20 = triton_helpers.promote_to_tensor(tl.sum(tmp18, 0))
tmp21 = 256.0
tmp22 = tmp6 / tmp21
tmp24 = 0.5
tmp25 = tmp23 * tmp24
tmp26 = tmp20 / tmp21
tmp27 = tmp25 * tmp26
tmp28 = tmp22 + tmp27
tmp31 = 0.020833333333333332
tmp32 = tmp30 * tmp31
tmp35 = tmp34 * tmp31
tmp36 = tmp32 + tmp35
tmp37 = 0.05
tmp38 = tmp36 * tmp37
tmp39 = tmp28 + tmp38
tl.store(out_ptr2 + (tl.broadcast_to(r0, [RBLOCK])), tmp39, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1, arg4_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg4_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [sub, pow_1, h_tv], Original ATen: [aten.sub, aten.pow, aten.sum]
stream0 = get_raw_stream(0)
triton_per_fused_pow_sub_sum_0.run(arg0_1, buf2, 1, 192, grid=grid(1), stream=stream0)
buf3 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [sub_1, pow_2, w_tv], Original ATen: [aten.sub, aten.pow, aten.sum]
triton_per_fused_pow_sub_sum_1.run(arg0_1, buf3, 1, 192, grid=grid(1), stream=stream0)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sub_2, pow_3, mean, mul, sub_3, relu, sub_4, abs_1, sub_5, pow_4, mul_1, mean_1, mul_2, add_1, truediv, truediv_1, tvloss, mul_3, loss], Original ATen: [aten.sub, aten.pow, aten.mean, aten.mul, aten.relu, aten.rsub, aten.abs, aten.add, aten.div]
triton_per_fused_abs_add_div_mean_mul_pow_relu_rsub_sub_2.run(arg1_1, arg2_1, arg4_1, arg0_1, arg3_1, buf2, buf3, buf4, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
del arg4_1
del buf2
del buf3
return (buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg3_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg4_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1, arg4_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class fixed_loss(nn.Module):
def __init__(self):
super().__init__()
def forward(self, out_image, gt_image, est_noise, gt_noise, if_asym):
h_x = est_noise.size()[2]
w_x = est_noise.size()[3]
count_h = self._tensor_size(est_noise[:, :, 1:, :])
count_w = self._tensor_size(est_noise[:, :, :, 1:])
h_tv = torch.pow(est_noise[:, :, 1:, :] - est_noise[:, :, :h_x - 1,
:], 2).sum()
w_tv = torch.pow(est_noise[:, :, :, 1:] - est_noise[:, :, :, :w_x -
1], 2).sum()
tvloss = h_tv / count_h + w_tv / count_w
loss = torch.mean(torch.pow(out_image - gt_image, 2)
) + if_asym * 0.5 * torch.mean(torch.mul(torch.abs(0.3 - F.relu
(gt_noise - est_noise)), torch.pow(est_noise - gt_noise, 2))
) + 0.05 * tvloss
return loss
def _tensor_size(self, t):
return t.size()[1] * t.size()[2] * t.size()[3]
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_pow_sub_sum_0(in_ptr0, out_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
rnumel = 192
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r0 = rindex % 12
r1 = rindex // 12
tmp0 = tl.load(in_ptr0 + (4 + r0 + 16 * r1), rmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (r0 + 16 * r1), rmask, other=0.0)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(rmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp7, None)
@triton.jit
def triton_per_fused_pow_sub_sum_1(in_ptr0, out_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
rnumel = 192
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r0 = rindex % 3
r1 = rindex // 3
tmp0 = tl.load(in_ptr0 + (1 + r0 + 4 * r1), rmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (r0 + 4 * r1), rmask, other=0.0)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(rmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp7, None)
@triton.jit
def triton_per_fused_abs_add_div_mean_mul_pow_relu_rsub_sub_2(in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr2, xnumel,
rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp7 = tl.load(in_ptr2 + r0, None)
tmp8 = tl.load(in_ptr3 + r0, None)
tmp23 = tl.load(in_ptr4 + r0, None)
tmp29 = tl.load(in_ptr5 + 0)
tmp30 = tl.broadcast_to(tmp29, [RBLOCK])
tmp33 = tl.load(in_ptr6 + 0)
tmp34 = tl.broadcast_to(tmp33, [RBLOCK])
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp9 = tmp7 - tmp8
tmp10 = tl.full([1], 0, tl.int32)
tmp11 = triton_helpers.maximum(tmp10, tmp9)
tmp12 = 0.3
tmp13 = tmp12 - tmp11
tmp14 = tl_math.abs(tmp13)
tmp15 = tmp8 - tmp7
tmp16 = tmp15 * tmp15
tmp17 = tmp14 * tmp16
tmp18 = tl.broadcast_to(tmp17, [RBLOCK])
tmp20 = triton_helpers.promote_to_tensor(tl.sum(tmp18, 0))
tmp21 = 256.0
tmp22 = tmp6 / tmp21
tmp24 = 0.5
tmp25 = tmp23 * tmp24
tmp26 = tmp20 / tmp21
tmp27 = tmp25 * tmp26
tmp28 = tmp22 + tmp27
tmp31 = 0.020833333333333332
tmp32 = tmp30 * tmp31
tmp35 = tmp34 * tmp31
tmp36 = tmp32 + tmp35
tmp37 = 0.05
tmp38 = tmp36 * tmp37
tmp39 = tmp28 + tmp38
tl.store(out_ptr2 + tl.broadcast_to(r0, [RBLOCK]), tmp39, None)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1, arg4_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg4_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_pow_sub_sum_0[grid(1)](arg0_1, buf2, 1, 192,
XBLOCK=1, num_warps=2, num_stages=1)
buf3 = empty_strided_cuda((), (), torch.float32)
triton_per_fused_pow_sub_sum_1[grid(1)](arg0_1, buf3, 1, 192,
XBLOCK=1, num_warps=2, num_stages=1)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_per_fused_abs_add_div_mean_mul_pow_relu_rsub_sub_2[grid(1)](
arg1_1, arg2_1, arg4_1, arg0_1, arg3_1, buf2, buf3, buf4, 1,
256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
del arg4_1
del buf2
del buf3
return buf4,
class fixed_lossNew(nn.Module):
def __init__(self):
super().__init__()
def _tensor_size(self, t):
return t.size()[1] * t.size()[2] * t.size()[3]
def forward(self, input_0, input_1, input_2, input_3, input_4):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
arg3_1 = input_3
arg4_1 = input_4
output = call([arg0_1, arg1_1, arg2_1, arg3_1, arg4_1])
return output[0]
| delldu/ImageClean | fixed_loss | false | 1,824 | [
"MIT"
] | 0 | ffa5b180d36afb3840c6b36c08a767c520068498 | https://github.com/delldu/ImageClean/tree/ffa5b180d36afb3840c6b36c08a767c520068498 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
def forward(self, out_image, gt_image, est_noise, gt_noise, if_asym):
h_x = est_noise.size()[2]
w_x = est_noise.size()[3]
count_h = self._tensor_size(est_noise[:, :, 1:, :])
count_w = self._tensor_size(est_noise[:, :, :, 1:])
h_tv = torch.pow(est_noise[:, :, 1:, :] - est_noise[:, :, :h_x - 1,
:], 2).sum()
w_tv = torch.pow(est_noise[:, :, :, 1:] - est_noise[:, :, :, :w_x -
1], 2).sum()
tvloss = h_tv / count_h + w_tv / count_w
loss = torch.mean(torch.pow(out_image - gt_image, 2)
) + if_asym * 0.5 * torch.mean(torch.mul(torch.abs(0.3 - F.relu
(gt_noise - est_noise)), torch.pow(est_noise - gt_noise, 2))
) + 0.05 * tvloss
return loss
def _tensor_size(self, t):
return t.size()[1] * t.size()[2] * t.size()[3]
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
Discriminator | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/ez/cezmv74yrhrunjwqrletcmzzbnanma4ylsle3v7w345t7kxp622s.py
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# matmul => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(primals_2, buf0, 64, 4, grid=grid(64, 4), stream=stream0)
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf1)
del primals_1
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
triton_poi_fused_clone_0.run(buf1, buf2, 64, 4, grid=grid(64, 4), stream=stream0)
buf3 = reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(primals_3, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), out=buf3)
del buf2
return (reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (16, 4, 4), (16, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn as nn
import torch.utils.data
def uniform(size, tensor):
stdv = 1.0 / math.sqrt(size)
if tensor is not None:
tensor.data.uniform_(-stdv, stdv)
class Discriminator(nn.Module):
def __init__(self, hidden_dim):
super(Discriminator, self).__init__()
self.weight = nn.Parameter(torch.Tensor(hidden_dim, hidden_dim))
self.reset_parameters()
def reset_parameters(self):
size = self.weight.size(0)
uniform(size, self.weight)
def forward(self, x, summary):
x = torch.matmul(x, torch.matmul(self.weight, summary))
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64, 4)](primals_2, buf0, 64, 4,
XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf1)
del primals_1
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_0[grid(64, 4)](buf1, buf2, 64, 4, XBLOCK=4,
YBLOCK=32, num_warps=4, num_stages=1)
buf3 = reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0)
del buf1
extern_kernels.bmm(reinterpret_tensor(primals_3, (16, 4, 4), (16, 4,
1), 0), reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0),
out=buf3)
del buf2
return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(
primals_3, (16, 4, 4), (16, 1, 4), 0)
def uniform(size, tensor):
stdv = 1.0 / math.sqrt(size)
if tensor is not None:
tensor.data.uniform_(-stdv, stdv)
class DiscriminatorNew(nn.Module):
def __init__(self, hidden_dim):
super(DiscriminatorNew, self).__init__()
self.weight = nn.Parameter(torch.Tensor(hidden_dim, hidden_dim))
self.reset_parameters()
def reset_parameters(self):
size = self.weight.size(0)
uniform(size, self.weight)
def forward(self, input_0, input_1):
primals_1 = self.weight
primals_2 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3])
return output[0]
| dendisuhubdy/pytorch_geometric | Discriminator | false | 1,825 | [
"MIT"
] | 0 | a0592f61aef617c0c8ff61b3d822d04901054c22 | https://github.com/dendisuhubdy/pytorch_geometric/tree/a0592f61aef617c0c8ff61b3d822d04901054c22 | import math
import torch
import torch.nn as nn
import torch.utils.data
def uniform(size, tensor):
stdv = 1.0 / math.sqrt(size)
if tensor is not None:
tensor.data.uniform_(-stdv, stdv)
class Model(nn.Module):
def __init__(self, hidden_dim):
super().__init__()
self.weight = nn.Parameter(torch.Tensor(hidden_dim, hidden_dim))
self.reset_parameters()
def reset_parameters(self):
size = self.weight.size(0)
uniform(size, self.weight)
def forward(self, x, summary):
x = torch.matmul(x, torch.matmul(self.weight, summary))
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
STFullyConnected | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/5y/c5ycegys2ar7wz6epy2shzbmz2mkdkn4qmpsdbgrzhhu5ufhemkx.py
# Topologically Sorted Source Nodes: [y], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# y => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 8000
x1 = (xindex // 8000)
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr0 + (x0 + (8064*x1)), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/2d/c2dldo522mnd74n46v2lrkgeutrjrvjind75q4sshb25ha2k2cdg.py
# Topologically Sorted Source Nodes: [y_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# y_1 => relu_1
# Graph fragment:
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 4000
x1 = (xindex // 4000)
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr0 + (x0 + (4096*x1)), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/4o/c4o2m7ozfavldjjmvy54d5j4mjtrlcvp7qonixpmh5h2yro5ycts.py
# Topologically Sorted Source Nodes: [y_2], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# y_2 => relu_2
# Graph fragment:
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_5,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_2, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_2 = async_compile.triton('triton_poi_fused_relu_threshold_backward_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2000
x1 = (xindex // 2000)
tmp0 = tl.load(in_out_ptr0 + (x0 + (2016*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x0 + (2016*x1)), tmp4, xmask)
tl.store(out_ptr0 + (x0 + (2048*x1)), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/ow/cowwhsc5cs2qznzwwjc3lfnep2kbdduqlytkv5dxjwuphmtmuvmg.py
# Topologically Sorted Source Nodes: [y_3], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# y_3 => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_7, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_7, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x3), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/u7/cu7ffzplxrjnumdhgwnhmfftk3s6t7gc5vwgldniafzjdoyhvfef.py
# Topologically Sorted Source Nodes: [y_3], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# y_3 => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_4 = async_compile.triton('triton_poi_fused__softmax_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (8000, 4), (4, 1))
assert_size_stride(primals_2, (8000, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4000, 8000), (8000, 1))
assert_size_stride(primals_5, (4000, ), (1, ))
assert_size_stride(primals_6, (2000, 4000), (4000, 1))
assert_size_stride(primals_7, (2000, ), (1, ))
assert_size_stride(primals_8, (4, 2000), (2000, 1))
assert_size_stride(primals_9, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 8000), (8000, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 8000), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 8000), (128000, 32000, 8000, 1), 0); del buf0 # reuse
buf11 = empty_strided_cuda((4, 4, 4, 8000), (129024, 32256, 8064, 1), torch.bool)
# Topologically Sorted Source Nodes: [y], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf11, 512000, grid=grid(512000), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4000), (4000, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 8000), (8000, 1), 0), reinterpret_tensor(primals_4, (8000, 4000), (1, 8000), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4000), (64000, 16000, 4000, 1), 0); del buf2 # reuse
buf10 = empty_strided_cuda((4, 4, 4, 4000), (65536, 16384, 4096, 1), torch.bool)
# Topologically Sorted Source Nodes: [y_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf3, primals_5, buf10, 256000, grid=grid(256000), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 2000), (2016, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf3, (64, 4000), (4000, 1), 0), reinterpret_tensor(primals_6, (4000, 2000), (1, 4000), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 2000), (32256, 8064, 2016, 1), 0); del buf4 # reuse
buf9 = empty_strided_cuda((4, 4, 4, 2000), (32768, 8192, 2048, 1), torch.bool)
# Topologically Sorted Source Nodes: [y_2], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_2.run(buf5, primals_7, buf9, 128000, grid=grid(128000), stream=stream0)
del primals_7
buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 2000), (2016, 1), 0), reinterpret_tensor(primals_8, (2000, 4), (1, 2000), 0), alpha=1, beta=1, out=buf6)
del primals_9
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [y_3], Original ATen: [aten._softmax]
triton_poi_fused__softmax_3.run(buf6, buf7, 256, grid=grid(256), stream=stream0)
buf8 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf6 # reuse
# Topologically Sorted Source Nodes: [y_3], Original ATen: [aten._softmax]
triton_poi_fused__softmax_4.run(buf7, buf8, 256, grid=grid(256), stream=stream0)
del buf7
return (buf8, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 8000), (8000, 1), 0), reinterpret_tensor(buf3, (64, 4000), (4000, 1), 0), reinterpret_tensor(buf5, (64, 2000), (2016, 1), 0), buf8, primals_8, buf9, primals_6, buf10, primals_4, buf11, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((8000, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((8000, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4000, 8000), (8000, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4000, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((2000, 4000), (4000, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((2000, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 2000), (2000, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import time
import torch
import numpy as np
from torch import nn
from torch import optim
from torch.nn import functional as F
class Base(nn.Module):
""" This class is the base structure for all of classification/regression DNN models.
Mainly, it provides the general methods for training, evaluating model and predcting the given data.
"""
def fit(self, train_loader, valid_loader, out, epochs=100, lr=0.0001):
"""Training the DNN model, similar to the scikit-learn or Keras style.
In the end, the optimal value of parameters will also be persisted on the hard drive.
Arguments:
train_loader (DataLoader): Data loader for training set,
including m X n target FloatTensor and m X l label FloatTensor
(m is the No. of sample, n is the No. of features, l is the No. of classes or tasks)
valid_loader (DataLoader): Data loader for validation set.
The data structure is as same as loader_train.
out (str): the file path for the model file (suffix with '.pkg')
and log file (suffix with '.log').
epochs(int, optional): The maximum of training epochs (default: 100)
lr (float, optional): learning rate (default: 1e-4)
"""
if 'optim' in self.__dict__:
optimizer = self.optim
else:
optimizer = optim.Adam(self.parameters(), lr=lr)
best_loss = np.inf
last_save = 0
log = open(out + '.log', 'w')
for epoch in range(epochs):
time.time()
for param_group in optimizer.param_groups:
param_group['lr'] = lr * (1 - 1 / epochs) ** (epoch * 10)
for i, (Xb, yb) in enumerate(train_loader):
Xb, yb = Xb, yb
optimizer.zero_grad()
y_ = self.forward(Xb, istrain=True)
ix = yb == yb
yb, y_ = yb[ix], y_[ix]
loss = self.criterion(y_, yb)
loss.backward()
optimizer.step()
loss_valid = self.evaluate(valid_loader)
None
if loss_valid < best_loss:
torch.save(self.state_dict(), out + '.pkg')
None
best_loss = loss_valid
last_save = epoch
else:
None
if epoch - last_save > 100:
break
log.close()
self.load_state_dict(torch.load(out + '.pkg'))
def evaluate(self, loader):
"""Evaluating the performance of the DNN model.
Arguments:
loader (torch.utils.data.DataLoader): data loader for test set,
including m X n target FloatTensor and l X n label FloatTensor
(m is the No. of sample, n is the No. of features, l is the No. of classes or tasks)
Return:
loss (float): the average loss value based on the calculation of loss function with given test set.
"""
loss = 0
for Xb, yb in loader:
Xb, yb = Xb, yb
y_ = self.forward(Xb)
ix = yb == yb
yb, y_ = yb[ix], y_[ix]
loss += self.criterion(y_, yb).data[0]
loss = loss / len(loader)
return loss
def predict(self, loader):
"""Predicting the probability of each sample in the given dataset.
Arguments:
loader (torch.utils.data.DataLoader): data loader for test set,
only including m X n target FloatTensor
(m is the No. of sample, n is the No. of features)
Return:
score (ndarray): probability of each sample in the given dataset,
it is a m X l FloatTensor (m is the No. of sample, l is the No. of classes or tasks.)
"""
score = []
for Xb, yb in loader:
Xb = Xb
y_ = self.forward(Xb)
score.append(y_.detach().cpu())
score = torch.cat(score, dim=0).numpy()
return score
class STFullyConnected(Base):
"""Single task DNN classification/regression model. It contains four fully connected layers between which
are dropout layer for robustness.
Arguments:
n_dim (int): the No. of columns (features) for input tensor
n_class (int): the No. of columns (classes) for output tensor.
is_reg (bool, optional): Regression model (True) or Classification model (False)
"""
def __init__(self, n_dim, n_class, is_reg=False):
super(STFullyConnected, self).__init__()
self.dropout = nn.Dropout(0.25)
self.fc0 = nn.Linear(n_dim, 8000)
self.fc1 = nn.Linear(8000, 4000)
self.fc2 = nn.Linear(4000, 2000)
self.fc3 = nn.Linear(2000, n_class)
self.is_reg = is_reg
if is_reg:
self.criterion = nn.MSELoss()
elif n_class == 1:
self.criterion = nn.BCELoss()
self.activation = nn.Sigmoid()
else:
self.criterion = nn.CrossEntropyLoss()
self.activation = nn.Softmax()
self
def forward(self, X, istrain=False):
"""Invoke the class directly as a function
Arguments:
X (FloatTensor): m X n FloatTensor, m is the No. of samples, n is the No. of features.
istrain (bool, optional): is it invoked during training process (True) or just for prediction (False)
Return:
y (FloatTensor): m X l FloatTensor, m is the No. of samples, n is the No. of classes
"""
y = F.relu(self.fc0(X))
if istrain:
y = self.dropout(y)
y = F.relu(self.fc1(y))
if istrain:
y = self.dropout(y)
y = F.relu(self.fc2(y))
if istrain:
y = self.dropout(y)
if self.is_reg:
y = self.fc3(y)
else:
y = self.activation(self.fc3(y))
return y
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_dim': 4, 'n_class': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import time
import numpy as np
from torch import nn
from torch import optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 8000
x1 = xindex // 8000
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + (x0 + 8064 * x1), tmp6, None)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 4000
x1 = xindex // 4000
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + (x0 + 4096 * x1), tmp6, None)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2000
x1 = xindex // 2000
tmp0 = tl.load(in_out_ptr0 + (x0 + 2016 * x1), xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x0 + 2016 * x1), tmp4, xmask)
tl.store(out_ptr0 + (x0 + 2048 * x1), tmp6, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (8000, 4), (4, 1))
assert_size_stride(primals_2, (8000,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4000, 8000), (8000, 1))
assert_size_stride(primals_5, (4000,), (1,))
assert_size_stride(primals_6, (2000, 4000), (4000, 1))
assert_size_stride(primals_7, (2000,), (1,))
assert_size_stride(primals_8, (4, 2000), (2000, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 8000), (8000, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 8000), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 8000), (128000, 32000,
8000, 1), 0)
del buf0
buf11 = empty_strided_cuda((4, 4, 4, 8000), (129024, 32256, 8064, 1
), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(512000)](buf1,
primals_2, buf11, 512000, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4000), (4000, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 8000), (8000, 1), 0
), reinterpret_tensor(primals_4, (8000, 4000), (1, 8000), 0),
out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4000), (64000, 16000,
4000, 1), 0)
del buf2
buf10 = empty_strided_cuda((4, 4, 4, 4000), (65536, 16384, 4096, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(256000)](buf3,
primals_5, buf10, 256000, XBLOCK=512, num_warps=8, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 2000), (2016, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 4000), (4000, 1), 0
), reinterpret_tensor(primals_6, (4000, 2000), (1, 4000), 0),
out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 2000), (32256, 8064, 2016,
1), 0)
del buf4
buf9 = empty_strided_cuda((4, 4, 4, 2000), (32768, 8192, 2048, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(128000)](buf5,
primals_7, buf9, 128000, XBLOCK=512, num_warps=8, num_stages=1)
del primals_7
buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 2000),
(2016, 1), 0), reinterpret_tensor(primals_8, (2000, 4), (1,
2000), 0), alpha=1, beta=1, out=buf6)
del primals_9
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_3[grid(256)](buf6, buf7, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf8 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf6
triton_poi_fused__softmax_4[grid(256)](buf7, buf8, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del buf7
return buf8, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 8000), (8000, 1), 0
), reinterpret_tensor(buf3, (64, 4000), (4000, 1), 0
), reinterpret_tensor(buf5, (64, 2000), (2016, 1), 0
), buf8, primals_8, buf9, primals_6, buf10, primals_4, buf11
class Base(nn.Module):
""" This class is the base structure for all of classification/regression DNN models.
Mainly, it provides the general methods for training, evaluating model and predcting the given data.
"""
def fit(self, train_loader, valid_loader, out, epochs=100, lr=0.0001):
"""Training the DNN model, similar to the scikit-learn or Keras style.
In the end, the optimal value of parameters will also be persisted on the hard drive.
Arguments:
train_loader (DataLoader): Data loader for training set,
including m X n target FloatTensor and m X l label FloatTensor
(m is the No. of sample, n is the No. of features, l is the No. of classes or tasks)
valid_loader (DataLoader): Data loader for validation set.
The data structure is as same as loader_train.
out (str): the file path for the model file (suffix with '.pkg')
and log file (suffix with '.log').
epochs(int, optional): The maximum of training epochs (default: 100)
lr (float, optional): learning rate (default: 1e-4)
"""
if 'optim' in self.__dict__:
optimizer = self.optim
else:
optimizer = optim.Adam(self.parameters(), lr=lr)
best_loss = np.inf
last_save = 0
log = open(out + '.log', 'w')
for epoch in range(epochs):
time.time()
for param_group in optimizer.param_groups:
param_group['lr'] = lr * (1 - 1 / epochs) ** (epoch * 10)
for i, (Xb, yb) in enumerate(train_loader):
Xb, yb = Xb, yb
optimizer.zero_grad()
y_ = self.forward(Xb, istrain=True)
ix = yb == yb
yb, y_ = yb[ix], y_[ix]
loss = self.criterion(y_, yb)
loss.backward()
optimizer.step()
loss_valid = self.evaluate(valid_loader)
None
if loss_valid < best_loss:
torch.save(self.state_dict(), out + '.pkg')
None
best_loss = loss_valid
last_save = epoch
else:
None
if epoch - last_save > 100:
break
log.close()
self.load_state_dict(torch.load(out + '.pkg'))
def evaluate(self, loader):
"""Evaluating the performance of the DNN model.
Arguments:
loader (torch.utils.data.DataLoader): data loader for test set,
including m X n target FloatTensor and l X n label FloatTensor
(m is the No. of sample, n is the No. of features, l is the No. of classes or tasks)
Return:
loss (float): the average loss value based on the calculation of loss function with given test set.
"""
loss = 0
for Xb, yb in loader:
Xb, yb = Xb, yb
y_ = self.forward(Xb)
ix = yb == yb
yb, y_ = yb[ix], y_[ix]
loss += self.criterion(y_, yb).data[0]
loss = loss / len(loader)
return loss
def predict(self, loader):
"""Predicting the probability of each sample in the given dataset.
Arguments:
loader (torch.utils.data.DataLoader): data loader for test set,
only including m X n target FloatTensor
(m is the No. of sample, n is the No. of features)
Return:
score (ndarray): probability of each sample in the given dataset,
it is a m X l FloatTensor (m is the No. of sample, l is the No. of classes or tasks.)
"""
score = []
for Xb, yb in loader:
Xb = Xb
y_ = self.forward(Xb)
score.append(y_.detach().cpu())
score = torch.cat(score, dim=0).numpy()
return score
class STFullyConnectedNew(Base):
"""Single task DNN classification/regression model. It contains four fully connected layers between which
are dropout layer for robustness.
Arguments:
n_dim (int): the No. of columns (features) for input tensor
n_class (int): the No. of columns (classes) for output tensor.
is_reg (bool, optional): Regression model (True) or Classification model (False)
"""
def __init__(self, n_dim, n_class, is_reg=False):
super(STFullyConnectedNew, self).__init__()
self.dropout = nn.Dropout(0.25)
self.fc0 = nn.Linear(n_dim, 8000)
self.fc1 = nn.Linear(8000, 4000)
self.fc2 = nn.Linear(4000, 2000)
self.fc3 = nn.Linear(2000, n_class)
self.is_reg = is_reg
if is_reg:
self.criterion = nn.MSELoss()
elif n_class == 1:
self.criterion = nn.BCELoss()
self.activation = nn.Sigmoid()
else:
self.criterion = nn.CrossEntropyLoss()
self.activation = nn.Softmax()
self
def forward(self, input_0):
primals_1 = self.fc0.weight
primals_2 = self.fc0.bias
primals_4 = self.fc1.weight
primals_5 = self.fc1.bias
primals_6 = self.fc2.weight
primals_7 = self.fc2.bias
primals_8 = self.fc3.weight
primals_9 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| cthoyt/DrugEx | STFullyConnected | false | 1,826 | [
"MIT"
] | 0 | 9e4d31adb2c65d0afc852948f502c79dcf8308a3 | https://github.com/cthoyt/DrugEx/tree/9e4d31adb2c65d0afc852948f502c79dcf8308a3 | import time
import torch
import numpy as np
from torch import nn
from torch import optim
from torch.nn import functional as F
class Base(nn.Module):
""" This class is the base structure for all of classification/regression DNN models.
Mainly, it provides the general methods for training, evaluating model and predcting the given data.
"""
def fit(self, train_loader, valid_loader, out, epochs=100, lr=0.0001):
"""Training the DNN model, similar to the scikit-learn or Keras style.
In the end, the optimal value of parameters will also be persisted on the hard drive.
Arguments:
train_loader (DataLoader): Data loader for training set,
including m X n target FloatTensor and m X l label FloatTensor
(m is the No. of sample, n is the No. of features, l is the No. of classes or tasks)
valid_loader (DataLoader): Data loader for validation set.
The data structure is as same as loader_train.
out (str): the file path for the model file (suffix with '.pkg')
and log file (suffix with '.log').
epochs(int, optional): The maximum of training epochs (default: 100)
lr (float, optional): learning rate (default: 1e-4)
"""
if 'optim' in self.__dict__:
optimizer = self.optim
else:
optimizer = optim.Adam(self.parameters(), lr=lr)
best_loss = np.inf
last_save = 0
log = open(out + '.log', 'w')
for epoch in range(epochs):
time.time()
for param_group in optimizer.param_groups:
param_group['lr'] = lr * (1 - 1 / epochs) ** (epoch * 10)
for i, (Xb, yb) in enumerate(train_loader):
Xb, yb = Xb, yb
optimizer.zero_grad()
y_ = self.forward(Xb, istrain=True)
ix = yb == yb
yb, y_ = yb[ix], y_[ix]
loss = self.criterion(y_, yb)
loss.backward()
optimizer.step()
loss_valid = self.evaluate(valid_loader)
None
if loss_valid < best_loss:
torch.save(self.state_dict(), out + '.pkg')
None
best_loss = loss_valid
last_save = epoch
else:
None
if epoch - last_save > 100:
break
log.close()
self.load_state_dict(torch.load(out + '.pkg'))
def evaluate(self, loader):
"""Evaluating the performance of the DNN model.
Arguments:
loader (torch.utils.data.DataLoader): data loader for test set,
including m X n target FloatTensor and l X n label FloatTensor
(m is the No. of sample, n is the No. of features, l is the No. of classes or tasks)
Return:
loss (float): the average loss value based on the calculation of loss function with given test set.
"""
loss = 0
for Xb, yb in loader:
Xb, yb = Xb, yb
y_ = self.forward(Xb)
ix = yb == yb
yb, y_ = yb[ix], y_[ix]
loss += self.criterion(y_, yb).data[0]
loss = loss / len(loader)
return loss
def predict(self, loader):
"""Predicting the probability of each sample in the given dataset.
Arguments:
loader (torch.utils.data.DataLoader): data loader for test set,
only including m X n target FloatTensor
(m is the No. of sample, n is the No. of features)
Return:
score (ndarray): probability of each sample in the given dataset,
it is a m X l FloatTensor (m is the No. of sample, l is the No. of classes or tasks.)
"""
score = []
for Xb, yb in loader:
Xb = Xb
y_ = self.forward(Xb)
score.append(y_.detach().cpu())
# ... truncated (>4000 chars) for memory efficiency |
SoftDetectionModule | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/72/c72xsm4j2ydy65cd6s2dqcf73hjvkqbi2lraxbqgd3vljynafcgn.py
# Topologically Sorted Source Nodes: [max_1], Original ATen: [aten.max]
# Source node to ATen node mapping:
# max_1 => max_1
# Graph fragment:
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%view, 1), kwargs = {})
triton_per_fused_max_0 = async_compile.triton('triton_per_fused_max_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_max_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_max_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0)
tmp1 = tl.full([1, 1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, float("-inf"))
tmp6 = triton_helpers.max2(tmp5, 1)[:, None]
tl.store(out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/yh/cyhmw3wd3ttwv3gwn5pdrawto4dhpm5r565oikeygp4rncuwxbr2.py
# Topologically Sorted Source Nodes: [batch, truediv, exp, pad], Original ATen: [aten.relu, aten.div, aten.exp, aten.constant_pad_nd]
# Source node to ATen node mapping:
# batch => relu
# exp => exp
# pad => constant_pad_nd
# truediv => div
# Graph fragment:
# %relu : [num_users=4] = call_function[target=torch.ops.aten.relu.default](args = (%arg0_1,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%relu, %view_1), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div,), kwargs = {})
# %constant_pad_nd : [num_users=1] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%exp, [1, 1, 1, 1], 1.0), kwargs = {})
triton_poi_fused_constant_pad_nd_div_exp_relu_1 = async_compile.triton('triton_poi_fused_constant_pad_nd_div_exp_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_div_exp_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_constant_pad_nd_div_exp_relu_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 6) % 6
x0 = xindex % 6
x4 = (xindex // 36)
x3 = (xindex // 144)
x6 = xindex
tmp0 = (-1) + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = (-1) + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + ((-5) + x0 + (4*x1) + (16*x4)), tmp10 & xmask, other=0.0)
tmp12 = tl.full([1], 0, tl.int32)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tmp14 = tl.load(in_ptr1 + (x3), tmp10 & xmask, eviction_policy='evict_last', other=0.0)
tmp15 = tmp13 / tmp14
tmp16 = tl_math.exp(tmp15)
tmp17 = tl.full(tmp16.shape, 1.0, tmp16.dtype)
tmp18 = tl.where(tmp10, tmp16, tmp17)
tl.store(out_ptr0 + (x6), tmp18, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/av/cavduzqn2kcecsivfbdoiquab64kxakphvxa2ir5glbkv2uxr56x.py
# Topologically Sorted Source Nodes: [batch, truediv, exp, pad, avg_pool2d], Original ATen: [aten.relu, aten.div, aten.exp, aten.constant_pad_nd, aten.avg_pool2d]
# Source node to ATen node mapping:
# avg_pool2d => avg_pool2d
# batch => relu
# exp => exp
# pad => constant_pad_nd
# truediv => div
# Graph fragment:
# %relu : [num_users=4] = call_function[target=torch.ops.aten.relu.default](args = (%arg0_1,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%relu, %view_1), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div,), kwargs = {})
# %constant_pad_nd : [num_users=1] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%exp, [1, 1, 1, 1], 1.0), kwargs = {})
# %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%constant_pad_nd, [3, 3], [1, 1]), kwargs = {})
triton_poi_fused_avg_pool2d_constant_pad_nd_div_exp_relu_2 = async_compile.triton('triton_poi_fused_avg_pool2d_constant_pad_nd_div_exp_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_constant_pad_nd_div_exp_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_avg_pool2d_constant_pad_nd_div_exp_relu_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (6*x1) + (36*x2)), xmask)
tmp1 = tl.load(in_ptr0 + (1 + x0 + (6*x1) + (36*x2)), xmask)
tmp3 = tl.load(in_ptr0 + (2 + x0 + (6*x1) + (36*x2)), xmask)
tmp5 = tl.load(in_ptr0 + (6 + x0 + (6*x1) + (36*x2)), xmask)
tmp7 = tl.load(in_ptr0 + (7 + x0 + (6*x1) + (36*x2)), xmask)
tmp9 = tl.load(in_ptr0 + (8 + x0 + (6*x1) + (36*x2)), xmask)
tmp11 = tl.load(in_ptr0 + (12 + x0 + (6*x1) + (36*x2)), xmask)
tmp13 = tl.load(in_ptr0 + (13 + x0 + (6*x1) + (36*x2)), xmask)
tmp15 = tl.load(in_ptr0 + (14 + x0 + (6*x1) + (36*x2)), xmask)
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp8 = tmp7 + tmp6
tmp10 = tmp9 + tmp8
tmp12 = tmp11 + tmp10
tmp14 = tmp13 + tmp12
tmp16 = tmp15 + tmp14
tmp17 = 0.1111111111111111
tmp18 = tmp16 * tmp17
tl.store(out_ptr0 + (x3), tmp18, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/6v/c6v2dq4j27mrmzkudmd2l2ookfsao2o6sqtsagdumpcc33lwpchn.py
# Topologically Sorted Source Nodes: [batch, truediv, exp, sum_exp, local_max_score, depth_wise_max_score, all_scores, max_3, sum_1, score_1], Original ATen: [aten.relu, aten.div, aten.exp, aten.mul, aten.max, aten.sum]
# Source node to ATen node mapping:
# all_scores => mul_1
# batch => relu
# depth_wise_max_score => div_2
# exp => exp
# local_max_score => div_1
# max_3 => max_3
# score_1 => div_3
# sum_1 => sum_1
# sum_exp => mul
# truediv => div
# Graph fragment:
# %relu : [num_users=4] = call_function[target=torch.ops.aten.relu.default](args = (%arg0_1,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%relu, %view_1), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%avg_pool2d, 9), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %mul), kwargs = {})
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%relu, %unsqueeze), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_1, %div_2), kwargs = {})
# %max_3 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%mul_1, 1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view_2, [1]), kwargs = {})
# %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%getitem_4, %view_3), kwargs = {})
triton_per_fused_div_exp_max_mul_relu_sum_3 = async_compile.triton('triton_per_fused_div_exp_max_mul_relu_sum_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_exp_max_mul_relu_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_div_exp_max_mul_relu_sum_3(in_ptr0, in_ptr1, in_ptr2, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0)
tmp3 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr2 + (r1 + (64*x0)), xmask, other=0.0)
tmp10 = tl.load(in_ptr0 + (16 + r1 + (64*x0)), xmask, other=0.0)
tmp13 = tl.load(in_ptr0 + (32 + r1 + (64*x0)), xmask, other=0.0)
tmp16 = tl.load(in_ptr0 + (48 + r1 + (64*x0)), xmask, other=0.0)
tmp23 = tl.load(in_ptr2 + (16 + r1 + (64*x0)), xmask, other=0.0)
tmp31 = tl.load(in_ptr2 + (32 + r1 + (64*x0)), xmask, other=0.0)
tmp39 = tl.load(in_ptr2 + (48 + r1 + (64*x0)), xmask, other=0.0)
tmp1 = tl.full([1, 1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = tmp2 / tmp3
tmp5 = tl_math.exp(tmp4)
tmp7 = 9.0
tmp8 = tmp6 * tmp7
tmp9 = tmp5 / tmp8
tmp11 = triton_helpers.maximum(tmp1, tmp10)
tmp12 = triton_helpers.maximum(tmp2, tmp11)
tmp14 = triton_helpers.maximum(tmp1, tmp13)
tmp15 = triton_helpers.maximum(tmp12, tmp14)
tmp17 = triton_helpers.maximum(tmp1, tmp16)
tmp18 = triton_helpers.maximum(tmp15, tmp17)
tmp19 = tmp2 / tmp18
tmp20 = tmp9 * tmp19
tmp21 = tmp11 / tmp3
tmp22 = tl_math.exp(tmp21)
tmp24 = tmp23 * tmp7
tmp25 = tmp22 / tmp24
tmp26 = tmp11 / tmp18
tmp27 = tmp25 * tmp26
tmp28 = triton_helpers.maximum(tmp20, tmp27)
tmp29 = tmp14 / tmp3
tmp30 = tl_math.exp(tmp29)
tmp32 = tmp31 * tmp7
tmp33 = tmp30 / tmp32
tmp34 = tmp14 / tmp18
tmp35 = tmp33 * tmp34
tmp36 = triton_helpers.maximum(tmp28, tmp35)
tmp37 = tmp17 / tmp3
tmp38 = tl_math.exp(tmp37)
tmp40 = tmp39 * tmp7
tmp41 = tmp38 / tmp40
tmp42 = tmp17 / tmp18
tmp43 = tmp41 * tmp42
tmp44 = triton_helpers.maximum(tmp36, tmp43)
tmp45 = tl.broadcast_to(tmp44, [XBLOCK, RBLOCK])
tmp47 = tl.where(xmask, tmp45, 0)
tmp48 = tl.sum(tmp47, 1)[:, None]
tmp49 = tmp44 / tmp48
tl.store(out_ptr2 + (r1 + (16*x0)), tmp49, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [max_1], Original ATen: [aten.max]
stream0 = get_raw_stream(0)
triton_per_fused_max_0.run(arg0_1, buf0, 4, 64, grid=grid(4), stream=stream0)
buf2 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
# Topologically Sorted Source Nodes: [batch, truediv, exp, pad], Original ATen: [aten.relu, aten.div, aten.exp, aten.constant_pad_nd]
triton_poi_fused_constant_pad_nd_div_exp_relu_1.run(arg0_1, buf0, buf2, 576, grid=grid(576), stream=stream0)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [batch, truediv, exp, pad, avg_pool2d], Original ATen: [aten.relu, aten.div, aten.exp, aten.constant_pad_nd, aten.avg_pool2d]
triton_poi_fused_avg_pool2d_constant_pad_nd_div_exp_relu_2.run(buf2, buf3, 256, grid=grid(256), stream=stream0)
del buf2
buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [batch, truediv, exp, sum_exp, local_max_score, depth_wise_max_score, all_scores, max_3, sum_1, score_1], Original ATen: [aten.relu, aten.div, aten.exp, aten.mul, aten.max, aten.sum]
triton_per_fused_div_exp_max_mul_relu_sum_3.run(arg0_1, buf0, buf3, buf6, 4, 16, grid=grid(4), stream=stream0)
del arg0_1
del buf0
del buf3
return (buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
import torch.nn as nn
class SoftDetectionModule(nn.Module):
def __init__(self, soft_local_max_size=3):
super(SoftDetectionModule, self).__init__()
self.soft_local_max_size = soft_local_max_size
self.pad = self.soft_local_max_size // 2
def forward(self, batch):
b = batch.size(0)
batch = F.relu(batch)
max_per_sample = torch.max(batch.view(b, -1), dim=1)[0]
exp = torch.exp(batch / max_per_sample.view(b, 1, 1, 1))
sum_exp = self.soft_local_max_size ** 2 * F.avg_pool2d(F.pad(exp, [
self.pad] * 4, mode='constant', value=1.0), self.
soft_local_max_size, stride=1)
local_max_score = exp / sum_exp
depth_wise_max = torch.max(batch, dim=1)[0]
depth_wise_max_score = batch / depth_wise_max.unsqueeze(1)
all_scores = local_max_score * depth_wise_max_score
score = torch.max(all_scores, dim=1)[0]
score = score / torch.sum(score.view(b, -1), dim=1).view(b, 1, 1)
return score
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_max_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.
constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.full([1, 1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, float('-inf'))
tmp6 = triton_helpers.max2(tmp5, 1)[:, None]
tl.store(out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_constant_pad_nd_div_exp_relu_1(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 6 % 6
x0 = xindex % 6
x4 = xindex // 36
x3 = xindex // 144
x6 = xindex
tmp0 = -1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -1 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x4), tmp10 & xmask,
other=0.0)
tmp12 = tl.full([1], 0, tl.int32)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tmp14 = tl.load(in_ptr1 + x3, tmp10 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp15 = tmp13 / tmp14
tmp16 = tl_math.exp(tmp15)
tmp17 = tl.full(tmp16.shape, 1.0, tmp16.dtype)
tmp18 = tl.where(tmp10, tmp16, tmp17)
tl.store(out_ptr0 + x6, tmp18, xmask)
@triton.jit
def triton_poi_fused_avg_pool2d_constant_pad_nd_div_exp_relu_2(in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 6 * x1 + 36 * x2), xmask)
tmp1 = tl.load(in_ptr0 + (1 + x0 + 6 * x1 + 36 * x2), xmask)
tmp3 = tl.load(in_ptr0 + (2 + x0 + 6 * x1 + 36 * x2), xmask)
tmp5 = tl.load(in_ptr0 + (6 + x0 + 6 * x1 + 36 * x2), xmask)
tmp7 = tl.load(in_ptr0 + (7 + x0 + 6 * x1 + 36 * x2), xmask)
tmp9 = tl.load(in_ptr0 + (8 + x0 + 6 * x1 + 36 * x2), xmask)
tmp11 = tl.load(in_ptr0 + (12 + x0 + 6 * x1 + 36 * x2), xmask)
tmp13 = tl.load(in_ptr0 + (13 + x0 + 6 * x1 + 36 * x2), xmask)
tmp15 = tl.load(in_ptr0 + (14 + x0 + 6 * x1 + 36 * x2), xmask)
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp8 = tmp7 + tmp6
tmp10 = tmp9 + tmp8
tmp12 = tmp11 + tmp10
tmp14 = tmp13 + tmp12
tmp16 = tmp15 + tmp14
tmp17 = 0.1111111111111111
tmp18 = tmp16 * tmp17
tl.store(out_ptr0 + x3, tmp18, xmask)
@triton.jit
def triton_per_fused_div_exp_max_mul_relu_sum_3(in_ptr0, in_ptr1, in_ptr2,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp3 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr2 + (r1 + 64 * x0), xmask, other=0.0)
tmp10 = tl.load(in_ptr0 + (16 + r1 + 64 * x0), xmask, other=0.0)
tmp13 = tl.load(in_ptr0 + (32 + r1 + 64 * x0), xmask, other=0.0)
tmp16 = tl.load(in_ptr0 + (48 + r1 + 64 * x0), xmask, other=0.0)
tmp23 = tl.load(in_ptr2 + (16 + r1 + 64 * x0), xmask, other=0.0)
tmp31 = tl.load(in_ptr2 + (32 + r1 + 64 * x0), xmask, other=0.0)
tmp39 = tl.load(in_ptr2 + (48 + r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.full([1, 1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = tmp2 / tmp3
tmp5 = tl_math.exp(tmp4)
tmp7 = 9.0
tmp8 = tmp6 * tmp7
tmp9 = tmp5 / tmp8
tmp11 = triton_helpers.maximum(tmp1, tmp10)
tmp12 = triton_helpers.maximum(tmp2, tmp11)
tmp14 = triton_helpers.maximum(tmp1, tmp13)
tmp15 = triton_helpers.maximum(tmp12, tmp14)
tmp17 = triton_helpers.maximum(tmp1, tmp16)
tmp18 = triton_helpers.maximum(tmp15, tmp17)
tmp19 = tmp2 / tmp18
tmp20 = tmp9 * tmp19
tmp21 = tmp11 / tmp3
tmp22 = tl_math.exp(tmp21)
tmp24 = tmp23 * tmp7
tmp25 = tmp22 / tmp24
tmp26 = tmp11 / tmp18
tmp27 = tmp25 * tmp26
tmp28 = triton_helpers.maximum(tmp20, tmp27)
tmp29 = tmp14 / tmp3
tmp30 = tl_math.exp(tmp29)
tmp32 = tmp31 * tmp7
tmp33 = tmp30 / tmp32
tmp34 = tmp14 / tmp18
tmp35 = tmp33 * tmp34
tmp36 = triton_helpers.maximum(tmp28, tmp35)
tmp37 = tmp17 / tmp3
tmp38 = tl_math.exp(tmp37)
tmp40 = tmp39 * tmp7
tmp41 = tmp38 / tmp40
tmp42 = tmp17 / tmp18
tmp43 = tmp41 * tmp42
tmp44 = triton_helpers.maximum(tmp36, tmp43)
tmp45 = tl.broadcast_to(tmp44, [XBLOCK, RBLOCK])
tmp47 = tl.where(xmask, tmp45, 0)
tmp48 = tl.sum(tmp47, 1)[:, None]
tmp49 = tmp44 / tmp48
tl.store(out_ptr2 + (r1 + 16 * x0), tmp49, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.float32)
get_raw_stream(0)
triton_per_fused_max_0[grid(4)](arg0_1, buf0, 4, 64, XBLOCK=1,
num_warps=2, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
triton_poi_fused_constant_pad_nd_div_exp_relu_1[grid(576)](arg0_1,
buf0, buf2, 576, XBLOCK=256, num_warps=4, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_avg_pool2d_constant_pad_nd_div_exp_relu_2[grid(256)](
buf2, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf2
buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_per_fused_div_exp_max_mul_relu_sum_3[grid(4)](arg0_1, buf0,
buf3, buf6, 4, 16, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del buf0
del buf3
return buf6,
class SoftDetectionModuleNew(nn.Module):
def __init__(self, soft_local_max_size=3):
super(SoftDetectionModuleNew, self).__init__()
self.soft_local_max_size = soft_local_max_size
self.pad = self.soft_local_max_size // 2
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| deep-learning-20/d2-net | SoftDetectionModule | false | 1,827 | [
"BSD-3-Clause-Clear"
] | 0 | b092186353af23e9247c7f56ac2de3396b8c5a00 | https://github.com/deep-learning-20/d2-net/tree/b092186353af23e9247c7f56ac2de3396b8c5a00 | import torch
import torch.nn.functional as F
import torch.nn as nn
class Model(nn.Module):
def __init__(self, soft_local_max_size=3):
super().__init__()
self.soft_local_max_size = soft_local_max_size
self.pad = self.soft_local_max_size // 2
def forward(self, batch):
b = batch.size(0)
batch = F.relu(batch)
max_per_sample = torch.max(batch.view(b, -1), dim=1)[0]
exp = torch.exp(batch / max_per_sample.view(b, 1, 1, 1))
sum_exp = self.soft_local_max_size ** 2 * F.avg_pool2d(F.pad(exp, [
self.pad] * 4, mode='constant', value=1.0), self.
soft_local_max_size, stride=1)
local_max_score = exp / sum_exp
depth_wise_max = torch.max(batch, dim=1)[0]
depth_wise_max_score = batch / depth_wise_max.unsqueeze(1)
all_scores = local_max_score * depth_wise_max_score
score = torch.max(all_scores, dim=1)[0]
score = score / torch.sum(score.view(b, -1), dim=1).view(b, 1, 1)
return score
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
AdaptiveAvgPool3dOutSize1 | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/tu/ctuej2j6f3oxr5p43q7juhagc3r3ncgs2ikvxemutunlnxlnvl24.py
# Topologically Sorted Source Nodes: [adaptive_avg_pool3d], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# adaptive_avg_pool3d => mean
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%arg0_1, [-1, -2, -3], True), kwargs = {})
triton_per_fused_mean_0 = async_compile.triton('triton_per_fused_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 64.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 1, 1, 1), (1, 1, 1, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [adaptive_avg_pool3d], Original ATen: [aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_mean_0.run(buf1, arg0_1, 4, 64, grid=grid(4), stream=stream0)
del arg0_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
from abc import abstractmethod
from typing import Tuple
import torch.nn
class EfficientBlockBase(nn.Module):
"""
PyTorchVideo/accelerator provides a set of efficient blocks
that have optimal efficiency for each target hardware device.
Each efficient block has two forms:
- original form: this form is for training. When efficient block is instantiated,
it is in this original form.
- deployable form: this form is for deployment. Once the network is ready for
deploy, it can be converted into deployable form for efficient execution
on target hardware. One block is transformed into deployable form by calling
convert() method. By conversion to deployable form,
various optimization (operator fuse, kernel optimization, etc.) are applied.
EfficientBlockBase is the base class for efficient blocks.
All efficient blocks should inherit this base class
and implement following methods:
- forward(): same as required by nn.Module
- convert(): called to convert block into deployable form
"""
@abstractmethod
def convert(self):
pass
@abstractmethod
def forward(self):
pass
class AdaptiveAvgPool3dOutSize1(EfficientBlockBase):
"""
Implements AdaptiveAvgPool3d with output (T, H, W) = (1, 1, 1). This operator has
better efficiency than AdaptiveAvgPool for mobile CPU.
"""
def __init__(self):
super().__init__()
self.pool = nn.AdaptiveAvgPool3d(1)
self.convert_flag = False
def convert(self, input_blob_size: 'Tuple', **kwargs):
"""
Converts AdaptiveAvgPool into AvgPool with constant kernel size for better
efficiency.
Args:
input_blob_size (tuple): blob size at the input of
AdaptiveAvgPool3dOutSize1 instance during forward.
kwargs (any): any keyword argument (unused).
"""
assert self.convert_flag is False, 'AdaptiveAvgPool3dOutSize1: already converted, cannot be converted again'
kernel_size = input_blob_size[2:]
self.pool = nn.AvgPool3d(kernel_size)
self.convert_flag = True
def forward(self, x):
return self.pool(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
from abc import abstractmethod
from typing import Tuple
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 64.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 1, 1, 1), (1, 1, 1, 1), 0)
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(4)](buf1, arg0_1, 4, 64, XBLOCK=1,
num_warps=2, num_stages=1)
del arg0_1
return buf1,
class EfficientBlockBase(nn.Module):
"""
PyTorchVideo/accelerator provides a set of efficient blocks
that have optimal efficiency for each target hardware device.
Each efficient block has two forms:
- original form: this form is for training. When efficient block is instantiated,
it is in this original form.
- deployable form: this form is for deployment. Once the network is ready for
deploy, it can be converted into deployable form for efficient execution
on target hardware. One block is transformed into deployable form by calling
convert() method. By conversion to deployable form,
various optimization (operator fuse, kernel optimization, etc.) are applied.
EfficientBlockBase is the base class for efficient blocks.
All efficient blocks should inherit this base class
and implement following methods:
- forward(): same as required by nn.Module
- convert(): called to convert block into deployable form
"""
@abstractmethod
def convert(self):
pass
@abstractmethod
def forward(self):
pass
class AdaptiveAvgPool3dOutSize1New(EfficientBlockBase):
"""
Implements AdaptiveAvgPool3d with output (T, H, W) = (1, 1, 1). This operator has
better efficiency than AdaptiveAvgPool for mobile CPU.
"""
def __init__(self):
super().__init__()
self.pool = nn.AdaptiveAvgPool3d(1)
self.convert_flag = False
def convert(self, input_blob_size: 'Tuple', **kwargs):
"""
Converts AdaptiveAvgPool into AvgPool with constant kernel size for better
efficiency.
Args:
input_blob_size (tuple): blob size at the input of
AdaptiveAvgPool3dOutSize1 instance during forward.
kwargs (any): any keyword argument (unused).
"""
assert self.convert_flag is False, 'AdaptiveAvgPool3dOutSize1: already converted, cannot be converted again'
kernel_size = input_blob_size[2:]
self.pool = nn.AvgPool3d(kernel_size)
self.convert_flag = True
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| denred0/pytorchvideo | AdaptiveAvgPool3dOutSize1 | false | 1,828 | [
"Apache-2.0"
] | 0 | d874bfc9969895d2afcedea2e12bae5e1bcfb809 | https://github.com/denred0/pytorchvideo/tree/d874bfc9969895d2afcedea2e12bae5e1bcfb809 | import torch
import torch.nn as nn
import torch.utils.data
from abc import abstractmethod
from typing import Tuple
import torch.nn
class EfficientBlockBase(nn.Module):
"""
PyTorchVideo/accelerator provides a set of efficient blocks
that have optimal efficiency for each target hardware device.
Each efficient block has two forms:
- original form: this form is for training. When efficient block is instantiated,
it is in this original form.
- deployable form: this form is for deployment. Once the network is ready for
deploy, it can be converted into deployable form for efficient execution
on target hardware. One block is transformed into deployable form by calling
convert() method. By conversion to deployable form,
various optimization (operator fuse, kernel optimization, etc.) are applied.
EfficientBlockBase is the base class for efficient blocks.
All efficient blocks should inherit this base class
and implement following methods:
- forward(): same as required by nn.Module
- convert(): called to convert block into deployable form
"""
@abstractmethod
def convert(self):
pass
@abstractmethod
def forward(self):
pass
class Model(EfficientBlockBase):
"""
Implements AdaptiveAvgPool3d with output (T, H, W) = (1, 1, 1). This operator has
better efficiency than AdaptiveAvgPool for mobile CPU.
"""
def __init__(self):
super().__init__()
self.pool = nn.AdaptiveAvgPool3d(1)
self.convert_flag = False
def convert(self, input_blob_size: 'Tuple', **kwargs):
"""
Converts AdaptiveAvgPool into AvgPool with constant kernel size for better
efficiency.
Args:
input_blob_size (tuple): blob size at the input of
AdaptiveAvgPool3dOutSize1 instance during forward.
kwargs (any): any keyword argument (unused).
"""
assert self.convert_flag is False, 'AdaptiveAvgPool3dOutSize1: already converted, cannot be converted again'
kernel_size = input_blob_size[2:]
self.pool = nn.AvgPool3d(kernel_size)
self.convert_flag = True
def forward(self, x):
return self.pool(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
SELU | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/6m/c6mdw2kaeasua3ylbmacjpovwvq4tt7thqvrmfiugf5ozgxx7vya.py
# Topologically Sorted Source Nodes: [ge, casted_condition, mul_1, sub, elu, mul, mul_2, output, mul_3], Original ATen: [aten.ge, aten._to_copy, aten.mul, aten.rsub, aten.elu, aten.add]
# Source node to ATen node mapping:
# casted_condition => convert_element_type
# elu => expm1, gt, mul, mul_1, mul_2, where
# ge => ge
# mul => mul_3
# mul_1 => mul_4
# mul_2 => mul_5
# mul_3 => mul_6
# output => add
# sub => sub
# Graph fragment:
# %ge : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%arg0_1, 0), kwargs = {})
# %convert_element_type : [num_users=2] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%ge, torch.float32), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type, %arg0_1), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %convert_element_type), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%arg0_1, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 1.0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 1.0), kwargs = {})
# %expm1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_1,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1, 1.0), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %mul, %mul_2), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where, 1.6732632423543772), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %mul_3), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %mul_5), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 1.0507009873554805), kwargs = {})
triton_poi_fused__to_copy_add_elu_ge_mul_rsub_0 = async_compile.triton('triton_poi_fused__to_copy_add_elu_ge_mul_rsub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_elu_ge_mul_rsub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_elu_ge_mul_rsub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 >= tmp1
tmp3 = tmp2.to(tl.float32)
tmp4 = tmp3 * tmp0
tmp5 = 1.0
tmp6 = tmp5 - tmp3
tmp7 = tmp0 > tmp1
tmp8 = tmp0 * tmp5
tmp9 = libdevice.expm1(tmp8)
tmp10 = tmp9 * tmp5
tmp11 = tl.where(tmp7, tmp8, tmp10)
tmp12 = 1.6732632423543772
tmp13 = tmp11 * tmp12
tmp14 = tmp6 * tmp13
tmp15 = tmp4 + tmp14
tmp16 = 1.0507009873554805
tmp17 = tmp15 * tmp16
tl.store(out_ptr0 + (x0), tmp17, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [ge, casted_condition, mul_1, sub, elu, mul, mul_2, output, mul_3], Original ATen: [aten.ge, aten._to_copy, aten.mul, aten.rsub, aten.elu, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused__to_copy_add_elu_ge_mul_rsub_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
def where(condition, if_true, if_false):
"""
Torch equivalent of numpy.where.
Parameters
----------
condition : torch.ByteTensor or torch.cuda.ByteTensor or torch.autograd.Variable
Condition to check.
if_true : torch.Tensor or torch.cuda.Tensor or torch.autograd.Variable
Output value if condition is true.
if_false: torch.Tensor or torch.cuda.Tensor or torch.autograd.Variable
Output value if condition is false
Returns
-------
torch.Tensor
Raises
------
AssertionError
if if_true and if_false are not both variables or both tensors.
AssertionError
if if_true and if_false don't have the same datatype.
"""
if isinstance(if_true, Variable) or isinstance(if_false, Variable):
assert isinstance(condition, Variable
), 'Condition must be a variable if either if_true or if_false is a variable.'
assert isinstance(if_false, Variable) and isinstance(if_false, Variable
), 'Both if_true and if_false must be variables if either is one.'
assert if_true.data.type() == if_false.data.type(
), 'Type mismatch: {} and {}'.format(if_true.data.type(),
if_false.data.type())
else:
assert not isinstance(condition, Variable
), 'Condition must not be a variable because neither if_true nor if_false is one.'
assert if_true.type() == if_false.type(
), 'Type mismatch: {} and {}'.format(if_true.data.type(),
if_false.data.type())
casted_condition = condition.type_as(if_true)
output = casted_condition * if_true + (1 - casted_condition) * if_false
return output
class SELU(nn.Module):
def forward(self, input):
return self.selu(input)
@staticmethod
def selu(x):
alpha = 1.6732632423543772
scale = 1.0507009873554805
return scale * where(x >= 0, x, alpha * F.elu(x))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy_add_elu_ge_mul_rsub_0(in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 >= tmp1
tmp3 = tmp2.to(tl.float32)
tmp4 = tmp3 * tmp0
tmp5 = 1.0
tmp6 = tmp5 - tmp3
tmp7 = tmp0 > tmp1
tmp8 = tmp0 * tmp5
tmp9 = libdevice.expm1(tmp8)
tmp10 = tmp9 * tmp5
tmp11 = tl.where(tmp7, tmp8, tmp10)
tmp12 = 1.6732632423543772
tmp13 = tmp11 * tmp12
tmp14 = tmp6 * tmp13
tmp15 = tmp4 + tmp14
tmp16 = 1.0507009873554805
tmp17 = tmp15 * tmp16
tl.store(out_ptr0 + x0, tmp17, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__to_copy_add_elu_ge_mul_rsub_0[grid(256)](arg0_1,
buf0, 256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
def where(condition, if_true, if_false):
"""
Torch equivalent of numpy.where.
Parameters
----------
condition : torch.ByteTensor or torch.cuda.ByteTensor or torch.autograd.Variable
Condition to check.
if_true : torch.Tensor or torch.cuda.Tensor or torch.autograd.Variable
Output value if condition is true.
if_false: torch.Tensor or torch.cuda.Tensor or torch.autograd.Variable
Output value if condition is false
Returns
-------
torch.Tensor
Raises
------
AssertionError
if if_true and if_false are not both variables or both tensors.
AssertionError
if if_true and if_false don't have the same datatype.
"""
if isinstance(if_true, Variable) or isinstance(if_false, Variable):
assert isinstance(condition, Variable
), 'Condition must be a variable if either if_true or if_false is a variable.'
assert isinstance(if_false, Variable) and isinstance(if_false, Variable
), 'Both if_true and if_false must be variables if either is one.'
assert if_true.data.type() == if_false.data.type(
), 'Type mismatch: {} and {}'.format(if_true.data.type(),
if_false.data.type())
else:
assert not isinstance(condition, Variable
), 'Condition must not be a variable because neither if_true nor if_false is one.'
assert if_true.type() == if_false.type(
), 'Type mismatch: {} and {}'.format(if_true.data.type(),
if_false.data.type())
casted_condition = condition.type_as(if_true)
output = casted_condition * if_true + (1 - casted_condition) * if_false
return output
class SELUNew(nn.Module):
@staticmethod
def selu(x):
alpha = 1.6732632423543772
scale = 1.0507009873554805
return scale * where(x >= 0, x, alpha * F.elu(x))
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| dearkafka/inferno | SELU | false | 1,829 | [
"Apache-2.0"
] | 0 | e9e3b863fd1fc97cf94d08ac6b4f8df7665f996a | https://github.com/dearkafka/inferno/tree/e9e3b863fd1fc97cf94d08ac6b4f8df7665f996a | import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
def where(condition, if_true, if_false):
"""
Torch equivalent of numpy.where.
Parameters
----------
condition : torch.ByteTensor or torch.cuda.ByteTensor or torch.autograd.Variable
Condition to check.
if_true : torch.Tensor or torch.cuda.Tensor or torch.autograd.Variable
Output value if condition is true.
if_false: torch.Tensor or torch.cuda.Tensor or torch.autograd.Variable
Output value if condition is false
Returns
-------
torch.Tensor
Raises
------
AssertionError
if if_true and if_false are not both variables or both tensors.
AssertionError
if if_true and if_false don't have the same datatype.
"""
if isinstance(if_true, Variable) or isinstance(if_false, Variable):
assert isinstance(condition, Variable
), 'Condition must be a variable if either if_true or if_false is a variable.'
assert isinstance(if_false, Variable) and isinstance(if_false, Variable
), 'Both if_true and if_false must be variables if either is one.'
assert if_true.data.type() == if_false.data.type(
), 'Type mismatch: {} and {}'.format(if_true.data.type(),
if_false.data.type())
else:
assert not isinstance(condition, Variable
), 'Condition must not be a variable because neither if_true nor if_false is one.'
assert if_true.type() == if_false.type(
), 'Type mismatch: {} and {}'.format(if_true.data.type(),
if_false.data.type())
casted_condition = condition.type_as(if_true)
output = casted_condition * if_true + (1 - casted_condition) * if_false
return output
class Model(nn.Module):
def forward(self, input):
return self.selu(input)
@staticmethod
def selu(x):
alpha = 1.6732632423543772
scale = 1.0507009873554805
return scale * where(x >= 0, x, alpha * F.elu(x))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
BatchDHCN | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/nn/cnnz7j7bvq4khlqkkeiwlx23s64rkadxb6sq4zk77hz6ujcjlnrw.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 1024
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 2
y1 = (yindex // 2)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (2*x2) + (18*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/p7/cp7ejdryfxgctra2x2hn5zril5d6o2omn2dstqiqvvizs2zmhe4e.py
# Topologically Sorted Source Nodes: [pad], Original ATen: [aten.constant_pad_nd]
# Source node to ATen node mapping:
# pad => constant_pad_nd
# Graph fragment:
# %constant_pad_nd : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%primals_1, [0, 2, 2, 0], 0.0), kwargs = {})
triton_poi_fused_constant_pad_nd_1 = async_compile.triton('triton_poi_fused_constant_pad_nd_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8, 64], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_constant_pad_nd_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 8
xnumel = 36
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x3 = (xindex // 6)
x2 = xindex % 6
y4 = yindex
x5 = xindex
y0 = yindex % 2
y1 = (yindex // 2)
tmp0 = (-2) + x3
tmp1 = tl.full([1, 1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = x2
tmp4 = tl.full([1, 1], 4, tl.int64)
tmp5 = tmp3 < tmp4
tmp6 = tmp2 & tmp5
tmp7 = tl.load(in_ptr0 + ((-8) + x2 + (4*x3) + (16*y4)), tmp6 & xmask & ymask, eviction_policy='evict_last', other=0.0)
tl.store(out_ptr0 + (y0 + (2*x5) + (72*y1)), tmp7, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/og/cogrelgx5xzmxfvrys7mrvsgxrpbnucpasz36qfgeyxjons5mqay.py
# Topologically Sorted Source Nodes: [x_conv_1, x_conv_2], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_conv_1 => convolution
# x_conv_2 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_2 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048, 16], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 2048
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 512
y1 = (yindex // 512)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (512*x2) + (8192*y1)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2 + (16*y3)), tmp4, xmask)
tl.store(out_ptr1 + (y0 + (512*x2) + (8192*y1)), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 2, 4, 4), (32, 16, 4, 1))
assert_size_stride(primals_2, (512, 2, 3, 3), (18, 9, 3, 1))
assert_size_stride(primals_3, (512, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((512, 2, 3, 3), (18, 1, 6, 2), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_2, buf0, 1024, 9, grid=grid(1024, 9), stream=stream0)
del primals_2
buf1 = empty_strided_cuda((4, 2, 6, 6), (72, 1, 12, 2), torch.float32)
# Topologically Sorted Source Nodes: [pad], Original ATen: [aten.constant_pad_nd]
triton_poi_fused_constant_pad_nd_1.run(primals_1, buf1, 8, 36, grid=grid(8, 36), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [x_conv_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 512, 4, 4), (8192, 1, 2048, 512))
buf3 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 512, 4, 4), (8192, 1, 2048, 512), torch.bool)
# Topologically Sorted Source Nodes: [x_conv_1, x_conv_2], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_2.run(buf2, primals_3, buf3, buf4, 2048, 16, grid=grid(2048, 16), stream=stream0)
del buf2
del primals_3
return (buf3, buf0, buf1, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 2, 4, 4), (32, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((512, 2, 3, 3), (18, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils
import torch.utils.data
import torch.optim
class BatchDHCN(nn.Module):
"""docstring for BatchDHCN"""
def __init__(self, embed_size=512, output_size=512, num_channel=2,
conv_size=3, batch_norm=True):
super(BatchDHCN, self).__init__()
self.batch_norm = batch_norm
self.embed_size = embed_size
self.output_size = output_size
self.num_channel = num_channel
self.padding = nn.ZeroPad2d((0, conv_size - 1, conv_size - 1, 0))
self.conv_1 = nn.Conv2d(self.num_channel, self.output_size, (
conv_size, conv_size))
self.dropout = nn.Dropout(p=0.5)
def forward(self, x):
x_conv_1 = self.conv_1(self.padding(x))
x_conv_1 = F.relu(x_conv_1)
return x_conv_1
def get_inputs():
return [torch.rand([4, 2, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils
import torch.utils.data
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 2
y1 = yindex // 2
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 2 * x2 + 18 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_constant_pad_nd_1(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 8
xnumel = 36
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x3 = xindex // 6
x2 = xindex % 6
y4 = yindex
x5 = xindex
y0 = yindex % 2
y1 = yindex // 2
tmp0 = -2 + x3
tmp1 = tl.full([1, 1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = x2
tmp4 = tl.full([1, 1], 4, tl.int64)
tmp5 = tmp3 < tmp4
tmp6 = tmp2 & tmp5
tmp7 = tl.load(in_ptr0 + (-8 + x2 + 4 * x3 + 16 * y4), tmp6 & xmask &
ymask, eviction_policy='evict_last', other=0.0)
tl.store(out_ptr0 + (y0 + 2 * x5 + 72 * y1), tmp7, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_2(in_ptr0, in_ptr1,
out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.
constexpr):
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 512
y1 = yindex // 512
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 512 * x2 + 8192 * y1), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2 + 16 * y3), tmp4, xmask)
tl.store(out_ptr1 + (y0 + 512 * x2 + 8192 * y1), tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 2, 4, 4), (32, 16, 4, 1))
assert_size_stride(primals_2, (512, 2, 3, 3), (18, 9, 3, 1))
assert_size_stride(primals_3, (512,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((512, 2, 3, 3), (18, 1, 6, 2), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(1024, 9)](primals_2, buf0, 1024, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((4, 2, 6, 6), (72, 1, 12, 2), torch.float32)
triton_poi_fused_constant_pad_nd_1[grid(8, 36)](primals_1, buf1, 8,
36, XBLOCK=32, YBLOCK=8, num_warps=4, num_stages=1)
del primals_1
buf2 = extern_kernels.convolution(buf1, buf0, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 512, 4, 4), (8192, 1, 2048, 512))
buf3 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.
float32)
buf4 = empty_strided_cuda((4, 512, 4, 4), (8192, 1, 2048, 512),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_2[grid(2048, 16)](
buf2, primals_3, buf3, buf4, 2048, 16, XBLOCK=16, YBLOCK=64,
num_warps=4, num_stages=1)
del buf2
del primals_3
return buf3, buf0, buf1, buf4
class BatchDHCNNew(nn.Module):
"""docstring for BatchDHCN"""
def __init__(self, embed_size=512, output_size=512, num_channel=2,
conv_size=3, batch_norm=True):
super(BatchDHCNNew, self).__init__()
self.batch_norm = batch_norm
self.embed_size = embed_size
self.output_size = output_size
self.num_channel = num_channel
self.padding = nn.ZeroPad2d((0, conv_size - 1, conv_size - 1, 0))
self.conv_1 = nn.Conv2d(self.num_channel, self.output_size, (
conv_size, conv_size))
self.dropout = nn.Dropout(p=0.5)
def forward(self, input_0):
primals_2 = self.conv_1.weight
primals_3 = self.conv_1.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| deeplearning2020/self | BatchDHCN | false | 1,830 | [
"MIT"
] | 0 | cf0e6f9acdcfe17906c6327042d25ac9c8894885 | https://github.com/deeplearning2020/self/tree/cf0e6f9acdcfe17906c6327042d25ac9c8894885 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils
import torch.utils.data
import torch.optim
class Model(nn.Module):
"""docstring for BatchDHCN"""
def __init__(self, embed_size=512, output_size=512, num_channel=2,
conv_size=3, batch_norm=True):
super().__init__()
self.batch_norm = batch_norm
self.embed_size = embed_size
self.output_size = output_size
self.num_channel = num_channel
self.padding = nn.ZeroPad2d((0, conv_size - 1, conv_size - 1, 0))
self.conv_1 = nn.Conv2d(self.num_channel, self.output_size, (
conv_size, conv_size))
self.dropout = nn.Dropout(p=0.5)
def forward(self, x):
x_conv_1 = self.conv_1(self.padding(x))
x_conv_1 = F.relu(x_conv_1)
return x_conv_1
def get_inputs():
return [torch.rand([4, 2, 4, 4])]
def get_init_inputs():
return []
|
DenseSAGEConv | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/j2/cj2d3c6gtln25wc32cnx4g72nkvrjiaxot4s7c34z5fmjt3bit4r.py
# Topologically Sorted Source Nodes: [sum_1, out_1], Original ATen: [aten.sum, aten.div]
# Source node to ATen node mapping:
# out_1 => div
# sum_1 => sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%primals_2, [-1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_2, %sum_1), kwargs = {})
triton_poi_fused_div_sum_0 = async_compile.triton('triton_poi_fused_div_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_sum_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(in_out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/gb/cgb4g77efcirrss3hx2wxyo3key6qfe3eufz6rkhiaw24s3l6c6l.py
# Topologically Sorted Source Nodes: [out_3, out_4], Original ATen: [aten.add, aten.linalg_vector_norm, aten.clamp_min]
# Source node to ATen node mapping:
# out_3 => add
# out_4 => clamp_min, pow_1, pow_2, sum_2
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_4, %primals_4), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add, 2), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [-1], True), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_2, 0.5), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%pow_2, 1e-12), kwargs = {})
triton_poi_fused_add_clamp_min_linalg_vector_norm_1 = async_compile.triton('triton_poi_fused_add_clamp_min_linalg_vector_norm_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_min_linalg_vector_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_min_linalg_vector_norm_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp5 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (1))
tmp7 = tl.broadcast_to(tmp6, [XBLOCK])
tmp11 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (2))
tmp13 = tl.broadcast_to(tmp12, [XBLOCK])
tmp17 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr1 + (3))
tmp19 = tl.broadcast_to(tmp18, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tmp3 * tmp3
tmp8 = tmp5 + tmp7
tmp9 = tmp8 * tmp8
tmp10 = tmp4 + tmp9
tmp14 = tmp11 + tmp13
tmp15 = tmp14 * tmp14
tmp16 = tmp10 + tmp15
tmp20 = tmp17 + tmp19
tmp21 = tmp20 * tmp20
tmp22 = tmp16 + tmp21
tmp23 = libdevice.sqrt(tmp22)
tmp24 = 1e-12
tmp25 = triton_helpers.maximum(tmp23, tmp24)
tl.store(out_ptr0 + (x0), tmp25, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/sd/csdjtdcl5il2uqr5omeysz44w3vimeurcs7qmifysr33dbijdjj2.py
# Topologically Sorted Source Nodes: [out_3, out_4], Original ATen: [aten.add, aten.div]
# Source node to ATen node mapping:
# out_3 => add
# out_4 => div_1
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_4, %primals_4), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add, %expand_2), kwargs = {})
triton_poi_fused_add_div_2 = async_compile.triton('triton_poi_fused_add_div_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 / tmp3
tl.store(out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(primals_2, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_1, (16, 4, 4), (16, 4, 1), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [sum_1, out_1], Original ATen: [aten.sum, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_div_sum_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_3, out=buf2)
del primals_3
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [out_3, out_4], Original ATen: [aten.add, aten.linalg_vector_norm, aten.clamp_min]
triton_poi_fused_add_clamp_min_linalg_vector_norm_1.run(buf2, primals_4, buf3, 64, grid=grid(64), stream=stream0)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_3, out_4], Original ATen: [aten.add, aten.div]
triton_poi_fused_add_div_2.run(buf2, primals_4, buf3, buf4, 256, grid=grid(256), stream=stream0)
del buf3
return (buf4, primals_4, buf2, reinterpret_tensor(buf1, (4, 64), (1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn.functional as F
import torch.utils.data
from torch.nn import Parameter
def uniform(size, tensor):
stdv = 1.0 / math.sqrt(size)
if tensor is not None:
tensor.data.uniform_(-stdv, stdv)
class DenseSAGEConv(torch.nn.Module):
def __init__(self, in_channels, out_channels, norm=True, norm_embed=
True, bias=True):
super(DenseSAGEConv, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.norm = norm
self.norm_embed = norm_embed
self.weight = Parameter(torch.Tensor(self.in_channels, out_channels))
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
uniform(self.in_channels, self.weight)
uniform(self.in_channels, self.bias)
def forward(self, x, adj):
x = x.unsqueeze(0) if x.dim() == 2 else x
adj = adj.unsqueeze(0) if adj.dim() == 2 else adj
out = torch.matmul(adj, x)
if self.norm:
out = out / adj.sum(dim=-1, keepdim=True)
out = torch.matmul(out, self.weight)
if self.bias is not None:
out = out + self.bias
if self.norm_embed:
out = F.normalize(out, p=2, dim=-1)
return out
def __repr__(self):
return '{}({}, {})'.format(self.__class__.__name__, self.
in_channels, self.out_channels)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import math
import torch.utils.data
from torch.nn import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_sum_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(in_out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_clamp_min_linalg_vector_norm_1(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp5 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + 1)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK])
tmp11 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + 2)
tmp13 = tl.broadcast_to(tmp12, [XBLOCK])
tmp17 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr1 + 3)
tmp19 = tl.broadcast_to(tmp18, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tmp3 * tmp3
tmp8 = tmp5 + tmp7
tmp9 = tmp8 * tmp8
tmp10 = tmp4 + tmp9
tmp14 = tmp11 + tmp13
tmp15 = tmp14 * tmp14
tmp16 = tmp10 + tmp15
tmp20 = tmp17 + tmp19
tmp21 = tmp20 * tmp20
tmp22 = tmp16 + tmp21
tmp23 = libdevice.sqrt(tmp22)
tmp24 = 1e-12
tmp25 = triton_helpers.maximum(tmp23, tmp24)
tl.store(out_ptr0 + x0, tmp25, xmask)
@triton.jit
def triton_poi_fused_add_div_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 / tmp3
tl.store(out_ptr0 + x2, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_2, (16, 4, 4), (16, 4,
1), 0), reinterpret_tensor(primals_1, (16, 4, 4), (16, 4, 1), 0
), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_div_sum_0[grid(256)](buf1, primals_2, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
primals_3, out=buf2)
del primals_3
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused_add_clamp_min_linalg_vector_norm_1[grid(64)](buf2,
primals_4, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_div_2[grid(256)](buf2, primals_4, buf3, buf4,
256, XBLOCK=128, num_warps=4, num_stages=1)
del buf3
return buf4, primals_4, buf2, reinterpret_tensor(buf1, (4, 64), (1, 4), 0)
def uniform(size, tensor):
stdv = 1.0 / math.sqrt(size)
if tensor is not None:
tensor.data.uniform_(-stdv, stdv)
class DenseSAGEConvNew(torch.nn.Module):
def __init__(self, in_channels, out_channels, norm=True, norm_embed=
True, bias=True):
super(DenseSAGEConvNew, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.norm = norm
self.norm_embed = norm_embed
self.weight = Parameter(torch.Tensor(self.in_channels, out_channels))
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
uniform(self.in_channels, self.weight)
uniform(self.in_channels, self.bias)
def __repr__(self):
return '{}({}, {})'.format(self.__class__.__name__, self.
in_channels, self.out_channels)
def forward(self, input_0, input_1):
primals_3 = self.weight
primals_4 = self.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| dendisuhubdy/pytorch_geometric | DenseSAGEConv | false | 1,831 | [
"MIT"
] | 0 | a0592f61aef617c0c8ff61b3d822d04901054c22 | https://github.com/dendisuhubdy/pytorch_geometric/tree/a0592f61aef617c0c8ff61b3d822d04901054c22 | import math
import torch
import torch.nn.functional as F
import torch.utils.data
from torch.nn import Parameter
def uniform(size, tensor):
stdv = 1.0 / math.sqrt(size)
if tensor is not None:
tensor.data.uniform_(-stdv, stdv)
class Model(torch.nn.Module):
def __init__(self, in_channels, out_channels, norm=True, norm_embed=
True, bias=True):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.norm = norm
self.norm_embed = norm_embed
self.weight = Parameter(torch.Tensor(self.in_channels, out_channels))
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
uniform(self.in_channels, self.weight)
uniform(self.in_channels, self.bias)
def forward(self, x, adj):
x = x.unsqueeze(0) if x.dim() == 2 else x
adj = adj.unsqueeze(0) if adj.dim() == 2 else adj
out = torch.matmul(adj, x)
if self.norm:
out = out / adj.sum(dim=-1, keepdim=True)
out = torch.matmul(out, self.weight)
if self.bias is not None:
out = out + self.bias
if self.norm_embed:
out = F.normalize(out, p=2, dim=-1)
return out
def __repr__(self):
return '{}({}, {})'.format(self.__class__.__name__, self.
in_channels, self.out_channels)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
MaskedMSE | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/lc/clcjwavbfgr6c56ur4yy3tcfbja6veotp77jfx2d7wive37vnxp4.py
# Topologically Sorted Source Nodes: [mul, add, mask, mul_1, mul_2, mse_loss], Original ATen: [aten.mul, aten.add, aten.div, aten.mse_loss]
# Source node to ATen node mapping:
# add => add
# mask => div
# mse_loss => mean, pow_1, sub
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 2.0), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 1e-07), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, %add), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %div), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %div), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_1, %mul_2), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {})
triton_per_fused_add_div_mse_loss_mul_0 = async_compile.triton('triton_per_fused_add_div_mse_loss_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mse_loss_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mse_loss_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = 2.0
tmp3 = tmp1 * tmp2
tmp4 = 1e-07
tmp5 = tmp1 + tmp4
tmp6 = tmp3 / tmp5
tmp7 = tmp0 * tmp6
tmp8 = tmp1 * tmp6
tmp9 = tmp7 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 256.0
tmp15 = tmp13 / tmp14
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp15, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [mul, add, mask, mul_1, mul_2, mse_loss], Original ATen: [aten.mul, aten.add, aten.div, aten.mse_loss]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_mse_loss_mul_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class MaskedMSE(nn.Module):
def __init__(self):
super(MaskedMSE, self).__init__()
self.criterion = nn.MSELoss()
def forward(self, input, target, gamma=2.0):
mask = gamma * target / (target + 1e-07)
self.loss = self.criterion(input * mask, target * mask)
return self.loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_mse_loss_mul_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = 2.0
tmp3 = tmp1 * tmp2
tmp4 = 1e-07
tmp5 = tmp1 + tmp4
tmp6 = tmp3 / tmp5
tmp7 = tmp0 * tmp6
tmp8 = tmp1 * tmp6
tmp9 = tmp7 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 256.0
tmp15 = tmp13 / tmp14
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp15, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_div_mse_loss_mul_0[grid(1)](buf1, arg1_1,
arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class MaskedMSENew(nn.Module):
def __init__(self):
super(MaskedMSENew, self).__init__()
self.criterion = nn.MSELoss()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| dhruvramani/AccentTransfer | MaskedMSE | false | 1,832 | [
"MIT"
] | 0 | 63a35b4aa37bc41c1f66dfb4bae76e2924183d7c | https://github.com/dhruvramani/AccentTransfer/tree/63a35b4aa37bc41c1f66dfb4bae76e2924183d7c | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self):
super().__init__()
self.criterion = nn.MSELoss()
def forward(self, input, target, gamma=2.0):
mask = gamma * target / (target + 1e-07)
self.loss = self.criterion(input * mask, target * mask)
return self.loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
adaILN | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/um/cumfxxnuff67646tii7qigo5xu4wp3dwgamqagptzcoe6havnpgp.py
# Topologically Sorted Source Nodes: [ln_mean, ln_var, add_1, sqrt_1], Original ATen: [aten.mean, aten.var, aten.add, aten.sqrt]
# Source node to ATen node mapping:
# add_1 => add_1
# ln_mean => mean_1
# ln_var => var_1
# sqrt_1 => sqrt_1
# Graph fragment:
# %mean_1 : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [1, 2, 3], True), kwargs = {})
# %var_1 : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%primals_1, [1, 2, 3]), kwargs = {correction: 1, keepdim: True})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%var_1, 1e-05), kwargs = {})
# %sqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_1,), kwargs = {})
triton_per_fused_add_mean_sqrt_var_0 = async_compile.triton('triton_per_fused_add_mean_sqrt_var_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mean_sqrt_var_0', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_mean_sqrt_var_0(in_out_ptr0, in_out_ptr1, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp6 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp1 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 64.0
tmp20 = tmp4 / tmp19
tmp21 = 63.0
tmp22 = tmp18 / tmp21
tmp23 = 1e-05
tmp24 = tmp22 + tmp23
tmp25 = libdevice.sqrt(tmp24)
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp20, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + (x0), tmp25, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/3n/c3ndqo7nij3t5k6ubr2vtyqzix6kntc5ziep6mlsj6ad5b7y77jj.py
# Topologically Sorted Source Nodes: [in_mean, in_var, sub, add, sqrt, out_in, sub_1, out_ln, mul, sub_2, mul_1, out], Original ATen: [aten.mean, aten.var, aten.sub, aten.add, aten.sqrt, aten.div, aten.mul, aten.rsub]
# Source node to ATen node mapping:
# add => add
# in_mean => mean
# in_var => var
# mul => mul
# mul_1 => mul_1
# out => add_2
# out_in => div
# out_ln => div_1
# sqrt => sqrt
# sub => sub
# sub_1 => sub_1
# sub_2 => sub_2
# Graph fragment:
# %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [2, 3], True), kwargs = {})
# %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%primals_1, [2, 3]), kwargs = {correction: 1, keepdim: True})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %mean), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%var, 1e-05), kwargs = {})
# %sqrt : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %sqrt), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %mean_1), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_1, %sqrt_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expand, %div), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %expand), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %div_1), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
triton_per_fused_add_div_mean_mul_rsub_sqrt_sub_var_1 = async_compile.triton('triton_per_fused_add_div_mean_mul_rsub_sqrt_sub_var_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mean_mul_rsub_sqrt_sub_var_1', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mean_mul_rsub_sqrt_sub_var_1(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
x3 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp26 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last')
tmp32 = tl.load(in_ptr2 + (x3), xmask, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr3 + (x3), xmask, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp6 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp1 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 16.0
tmp20 = tmp4 / tmp19
tmp21 = 15.0
tmp22 = tmp18 / tmp21
tmp23 = 1e-05
tmp24 = tmp22 + tmp23
tmp25 = libdevice.sqrt(tmp24)
tmp27 = tmp0 - tmp20
tmp28 = tmp27 / tmp25
tmp29 = tmp26 * tmp28
tmp30 = 1.0
tmp31 = tmp30 - tmp26
tmp33 = tmp0 - tmp32
tmp35 = tmp33 / tmp34
tmp36 = tmp31 * tmp35
tmp37 = tmp29 + tmp36
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp20, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + (x0), tmp25, xmask)
tl.store(out_ptr0 + (r1 + (16*x0)), tmp37, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/bh/cbhkkkfowmcaigx6sts2oovquwlb7qm6qdlrdzh26rfoghoreml6.py
# Topologically Sorted Source Nodes: [sub, out_in, sub_1, out_ln, mul, sub_2, mul_1, out, mul_2, out_1], Original ATen: [aten.sub, aten.div, aten.mul, aten.rsub, aten.add]
# Source node to ATen node mapping:
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# out => add_2
# out_1 => add_3
# out_in => div
# out_ln => div_1
# sub => sub
# sub_1 => sub_1
# sub_2 => sub_2
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %mean), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %sqrt), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %mean_1), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_1, %sqrt_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expand, %div), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %expand), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %div_1), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_2, %unsqueeze_1), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %unsqueeze_3), kwargs = {})
triton_poi_fused_add_div_mul_rsub_sub_2 = async_compile.triton('triton_poi_fused_add_div_mul_rsub_sub_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_rsub_sub_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mul_rsub_sub_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex % 256
x0 = xindex % 16
x2 = (xindex // 256)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x3), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x0 + (16*x2)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x0 + (16*x2)), None, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp2 + tmp3
tl.store(out_ptr0 + (x4), tmp4, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf6 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf9 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf7 = reinterpret_tensor(buf6, (4, 1, 1, 1), (1, 1, 1, 1), 0); del buf6 # reuse
buf11 = reinterpret_tensor(buf9, (4, 1, 1, 1), (1, 1, 1, 1), 0); del buf9 # reuse
# Topologically Sorted Source Nodes: [ln_mean, ln_var, add_1, sqrt_1], Original ATen: [aten.mean, aten.var, aten.add, aten.sqrt]
stream0 = get_raw_stream(0)
triton_per_fused_add_mean_sqrt_var_0.run(buf7, buf11, primals_1, 4, 64, grid=grid(4), stream=stream0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf3 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0); del buf0 # reuse
buf5 = reinterpret_tensor(buf3, (4, 4, 1, 1), (4, 1, 1, 1), 0); del buf3 # reuse
buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [in_mean, in_var, sub, add, sqrt, out_in, sub_1, out_ln, mul, sub_2, mul_1, out], Original ATen: [aten.mean, aten.var, aten.sub, aten.add, aten.sqrt, aten.div, aten.mul, aten.rsub]
triton_per_fused_add_div_mean_mul_rsub_sqrt_sub_var_1.run(buf1, buf5, primals_1, primals_2, buf7, buf11, buf12, 16, 16, grid=grid(16), stream=stream0)
del primals_2
buf13 = empty_strided_cuda((4, 4, 4, 4, 4, 4), (1024, 256, 64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sub, out_in, sub_1, out_ln, mul, sub_2, mul_1, out, mul_2, out_1], Original ATen: [aten.sub, aten.div, aten.mul, aten.rsub, aten.add]
triton_poi_fused_add_div_mul_rsub_sub_2.run(buf12, primals_3, primals_4, buf13, 4096, grid=grid(4096), stream=stream0)
del buf12
del primals_4
return (buf13, primals_1, primals_3, buf1, buf5, buf7, buf11, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
class adaILN(nn.Module):
def __init__(self, num_features, eps=1e-05):
super(adaILN, self).__init__()
self.eps = eps
self.rho = Parameter(torch.Tensor(1, num_features, 1, 1))
self.rho.data.fill_(0.9)
def forward(self, input, gamma, beta):
in_mean, in_var = torch.mean(input, dim=[2, 3], keepdim=True
), torch.var(input, dim=[2, 3], keepdim=True)
out_in = (input - in_mean) / torch.sqrt(in_var + self.eps)
ln_mean, ln_var = torch.mean(input, dim=[1, 2, 3], keepdim=True
), torch.var(input, dim=[1, 2, 3], keepdim=True)
out_ln = (input - ln_mean) / torch.sqrt(ln_var + self.eps)
out = self.rho.expand(input.shape[0], -1, -1, -1) * out_in + (1 -
self.rho.expand(input.shape[0], -1, -1, -1)) * out_ln
out = out * gamma.unsqueeze(2).unsqueeze(3) + beta.unsqueeze(2
).unsqueeze(3)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_features': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
from torch.nn.parameter import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_add_mean_sqrt_var_0(in_out_ptr0, in_out_ptr1, in_ptr0,
xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp6 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp1 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 64.0
tmp20 = tmp4 / tmp19
tmp21 = 63.0
tmp22 = tmp18 / tmp21
tmp23 = 1e-05
tmp24 = tmp22 + tmp23
tmp25 = libdevice.sqrt(tmp24)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp20, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + x0, tmp25, xmask)
@triton.jit
def triton_per_fused_add_div_mean_mul_rsub_sqrt_sub_var_1(in_out_ptr0,
in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
x3 = xindex // 4
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp26 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp32 = tl.load(in_ptr2 + x3, xmask, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr3 + x3, xmask, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp6 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp1 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 16.0
tmp20 = tmp4 / tmp19
tmp21 = 15.0
tmp22 = tmp18 / tmp21
tmp23 = 1e-05
tmp24 = tmp22 + tmp23
tmp25 = libdevice.sqrt(tmp24)
tmp27 = tmp0 - tmp20
tmp28 = tmp27 / tmp25
tmp29 = tmp26 * tmp28
tmp30 = 1.0
tmp31 = tmp30 - tmp26
tmp33 = tmp0 - tmp32
tmp35 = tmp33 / tmp34
tmp36 = tmp31 * tmp35
tmp37 = tmp29 + tmp36
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp20, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + x0, tmp25, xmask)
tl.store(out_ptr0 + (r1 + 16 * x0), tmp37, xmask)
@triton.jit
def triton_poi_fused_add_div_mul_rsub_sub_2(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex % 256
x0 = xindex % 16
x2 = xindex // 256
x4 = xindex
tmp0 = tl.load(in_ptr0 + x3, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x0 + 16 * x2), None, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr2 + (x0 + 16 * x2), None, eviction_policy='evict_last'
)
tmp2 = tmp0 * tmp1
tmp4 = tmp2 + tmp3
tl.store(out_ptr0 + x4, tmp4, None)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf6 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf9 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf7 = reinterpret_tensor(buf6, (4, 1, 1, 1), (1, 1, 1, 1), 0)
del buf6
buf11 = reinterpret_tensor(buf9, (4, 1, 1, 1), (1, 1, 1, 1), 0)
del buf9
get_raw_stream(0)
triton_per_fused_add_mean_sqrt_var_0[grid(4)](buf7, buf11,
primals_1, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf3 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf0
buf5 = reinterpret_tensor(buf3, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf3
buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_per_fused_add_div_mean_mul_rsub_sqrt_sub_var_1[grid(16)](buf1,
buf5, primals_1, primals_2, buf7, buf11, buf12, 16, 16, XBLOCK=
8, num_warps=2, num_stages=1)
del primals_2
buf13 = empty_strided_cuda((4, 4, 4, 4, 4, 4), (1024, 256, 64, 16,
4, 1), torch.float32)
triton_poi_fused_add_div_mul_rsub_sub_2[grid(4096)](buf12,
primals_3, primals_4, buf13, 4096, XBLOCK=256, num_warps=4,
num_stages=1)
del buf12
del primals_4
return buf13, primals_1, primals_3, buf1, buf5, buf7, buf11
class adaILNNew(nn.Module):
def __init__(self, num_features, eps=1e-05):
super(adaILNNew, self).__init__()
self.eps = eps
self.rho = Parameter(torch.Tensor(1, num_features, 1, 1))
self.rho.data.fill_(0.9)
def forward(self, input_0, input_1, input_2):
primals_2 = self.rho
primals_1 = input_0
primals_3 = input_1
primals_4 = input_2
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| denny3388/Conditional-UGATIT | adaILN | false | 1,833 | [
"MIT"
] | 0 | 86ad35f05aaa105a814dec031d37370f44b71d5b | https://github.com/denny3388/Conditional-UGATIT/tree/86ad35f05aaa105a814dec031d37370f44b71d5b | import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
class Model(nn.Module):
def __init__(self, num_features, eps=1e-05):
super().__init__()
self.eps = eps
self.rho = Parameter(torch.Tensor(1, num_features, 1, 1))
self.rho.data.fill_(0.9)
def forward(self, input, gamma, beta):
in_mean, in_var = torch.mean(input, dim=[2, 3], keepdim=True
), torch.var(input, dim=[2, 3], keepdim=True)
out_in = (input - in_mean) / torch.sqrt(in_var + self.eps)
ln_mean, ln_var = torch.mean(input, dim=[1, 2, 3], keepdim=True
), torch.var(input, dim=[1, 2, 3], keepdim=True)
out_ln = (input - ln_mean) / torch.sqrt(ln_var + self.eps)
out = self.rho.expand(input.shape[0], -1, -1, -1) * out_in + (1 -
self.rho.expand(input.shape[0], -1, -1, -1)) * out_ln
out = out * gamma.unsqueeze(2).unsqueeze(3) + beta.unsqueeze(2
).unsqueeze(3)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
MaskedTemporalPooling | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/7l/c7lvig2zhj2iehtujvqrkzrlwayqdobfawdm2rzdswlxaokpz36v.py
# Topologically Sorted Source Nodes: [setitem], Original ATen: [aten.lift_fresh, aten.index_put]
# Source node to ATen node mapping:
# setitem => full_default_1, index_put
# Graph fragment:
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -inf), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False})
# %index_put : [num_users=1] = call_function[target=torch.ops.aten.index_put_.default](args = (%arg0_1, [%full_default], %full_default_1), kwargs = {})
triton_poi_fused_index_put_lift_fresh_0 = async_compile.triton('triton_poi_fused_index_put_lift_fresh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_put_lift_fresh_0', 'mutated_arg_names': ['in_ptr0', 'out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_index_put_lift_fresh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.full([1], False, tl.int1)
tmp2 = float("-inf")
tmp3 = tl.where(tmp1, tmp2, tmp0)
tl.store(out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/kd/ckdjqudl6o2qnmvu7ql4ktibvn34h6thuyobkjyytvrpbzhegrn5.py
# Topologically Sorted Source Nodes: [setitem_1], Original ATen: [aten.lift_fresh, aten.index_put]
# Source node to ATen node mapping:
# setitem_1 => full_default_3, index_put_1
# Graph fragment:
# %full_default_3 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False})
# %index_put_1 : [num_users=2] = call_function[target=torch.ops.aten.index_put_.default](args = (%index_put, [%bitwise_not_1], %full_default_3), kwargs = {})
triton_poi_fused_index_put_lift_fresh_1 = async_compile.triton('triton_poi_fused_index_put_lift_fresh_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_put_lift_fresh_1', 'mutated_arg_names': ['in_ptr0', 'out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_index_put_lift_fresh_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp5 = tl.load(in_ptr0 + (x0), xmask)
tmp0 = tl.full([1], True, tl.int1)
tmp1 = tmp0 | tmp0
tmp2 = tmp1 | tmp0
tmp3 = tmp2 | tmp0
tmp4 = tmp3 == 0
tmp6 = 0.0
tmp7 = tl.where(tmp4, tmp6, tmp5)
tl.store(out_ptr0 + (x0), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/5l/c5lg2i2ofxflslf7bu6w7i56fb72x6ezjgb35d2woe4gnj5ur6ao.py
# Topologically Sorted Source Nodes: [max_1], Original ATen: [aten.max]
# Source node to ATen node mapping:
# max_1 => getitem
# Graph fragment:
# %getitem : [num_users=1] = call_function[target=operator.getitem](args = (%max_1, 0), kwargs = {})
triton_poi_fused_max_2 = async_compile.triton('triton_poi_fused_max_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask)
tmp3 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask)
tmp5 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask)
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [setitem], Original ATen: [aten.lift_fresh, aten.index_put]
stream0 = get_raw_stream(0)
triton_poi_fused_index_put_lift_fresh_0.run(arg0_1, arg0_1, 64, grid=grid(64), stream=stream0)
# Topologically Sorted Source Nodes: [setitem_1], Original ATen: [aten.lift_fresh, aten.index_put]
triton_poi_fused_index_put_lift_fresh_1.run(arg0_1, arg0_1, 64, grid=grid(64), stream=stream0)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [max_1], Original ATen: [aten.max]
triton_poi_fused_max_2.run(arg0_1, buf2, 16, grid=grid(16), stream=stream0)
del arg0_1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
from typing import Optional
import torch.nn
class MaskedTemporalPooling(torch.nn.Module):
"""
Applies temporal pooling operations on masked inputs. For each pooling operation
all masked values are ignored.
"""
def __init__(self, method: 'str'):
"""
method (str): the method of pooling to use. Options:
'max': reduces temporal dimension to each valid max value.
'avg': averages valid values in the temporal dimension.
'sum': sums valid values in the temporal dimension.
Note if all batch row elements are invalid, the temporal dimension is
pooled to 0 values.
"""
super().__init__()
assert method in ('max', 'avg', 'sum')
self._method = method
def forward(self, x: 'torch.Tensor', mask: 'Optional[torch.Tensor]'=None
) ->torch.Tensor:
"""
Args:
x (torch.Tensor): tensor with shape (batch_size, seq_len, feature_dim)
mask (torch.Tensor): bool tensor with shape (batch_size, seq_len).
Sequence elements that are False are invalid.
Returns:
Tensor with shape (batch_size, feature_dim)
"""
assert x.dim(
) == 3, 'Requires x shape (batch_size x seq_len x feature_dim)'
b, t = x.shape[0], x.shape[1]
if mask is None:
mask = torch.ones((b, t), dtype=torch.bool)
if self._method == 'max':
x[~mask, :] = float('-inf')
invalid_first_dim = ~mask.view(b, -1).any(dim=-1)
x[invalid_first_dim, :] = 0
x = torch.max(x, dim=1)[0]
elif self._method == 'avg':
x = x * mask.unsqueeze(-1).float()
mask = mask.view(b, t, -1).any(dim=-1)
valid_lengths = mask.float().sum(dim=-1).int()
x = x.sum(dim=1)
x = x.div(valid_lengths.clamp(min=1).unsqueeze(-1).expand(x.
size()).float())
elif self._method == 'sum':
x = x * mask.unsqueeze(-1).float()
x = x.sum(dim=1)
else:
raise NotImplementedError(
f"{self._method} not available options are: 'max', 'avg', 'sum'"
)
return x
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'method': 'max'}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils.data
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_index_put_lift_fresh_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.full([1], False, tl.int1)
tmp2 = float('-inf')
tmp3 = tl.where(tmp1, tmp2, tmp0)
tl.store(out_ptr0 + x0, tmp3, xmask)
@triton.jit
def triton_poi_fused_index_put_lift_fresh_1(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp5 = tl.load(in_ptr0 + x0, xmask)
tmp0 = tl.full([1], True, tl.int1)
tmp1 = tmp0 | tmp0
tmp2 = tmp1 | tmp0
tmp3 = tmp2 | tmp0
tmp4 = tmp3 == 0
tmp6 = 0.0
tmp7 = tl.where(tmp4, tmp6, tmp5)
tl.store(out_ptr0 + x0, tmp7, xmask)
@triton.jit
def triton_poi_fused_max_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
get_raw_stream(0)
triton_poi_fused_index_put_lift_fresh_0[grid(64)](arg0_1, arg0_1,
64, XBLOCK=64, num_warps=1, num_stages=1)
triton_poi_fused_index_put_lift_fresh_1[grid(64)](arg0_1, arg0_1,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_max_2[grid(16)](arg0_1, buf2, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del arg0_1
return buf2,
class MaskedTemporalPoolingNew(torch.nn.Module):
"""
Applies temporal pooling operations on masked inputs. For each pooling operation
all masked values are ignored.
"""
def __init__(self, method: 'str'):
"""
method (str): the method of pooling to use. Options:
'max': reduces temporal dimension to each valid max value.
'avg': averages valid values in the temporal dimension.
'sum': sums valid values in the temporal dimension.
Note if all batch row elements are invalid, the temporal dimension is
pooled to 0 values.
"""
super().__init__()
assert method in ('max', 'avg', 'sum')
self._method = method
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| denred0/pytorchvideo | MaskedTemporalPooling | false | 1,834 | [
"Apache-2.0"
] | 0 | d874bfc9969895d2afcedea2e12bae5e1bcfb809 | https://github.com/denred0/pytorchvideo/tree/d874bfc9969895d2afcedea2e12bae5e1bcfb809 | import torch
import torch.utils.data
from typing import Optional
import torch.nn
class Model(torch.nn.Module):
"""
Applies temporal pooling operations on masked inputs. For each pooling operation
all masked values are ignored.
"""
def __init__(self, method: 'str'):
"""
method (str): the method of pooling to use. Options:
'max': reduces temporal dimension to each valid max value.
'avg': averages valid values in the temporal dimension.
'sum': sums valid values in the temporal dimension.
Note if all batch row elements are invalid, the temporal dimension is
pooled to 0 values.
"""
super().__init__()
assert method in ('max', 'avg', 'sum')
self._method = method
def forward(self, x: 'torch.Tensor', mask: 'Optional[torch.Tensor]'=None
) ->torch.Tensor:
"""
Args:
x (torch.Tensor): tensor with shape (batch_size, seq_len, feature_dim)
mask (torch.Tensor): bool tensor with shape (batch_size, seq_len).
Sequence elements that are False are invalid.
Returns:
Tensor with shape (batch_size, feature_dim)
"""
assert x.dim(
) == 3, 'Requires x shape (batch_size x seq_len x feature_dim)'
b, t = x.shape[0], x.shape[1]
if mask is None:
mask = torch.ones((b, t), dtype=torch.bool)
if self._method == 'max':
x[~mask, :] = float('-inf')
invalid_first_dim = ~mask.view(b, -1).any(dim=-1)
x[invalid_first_dim, :] = 0
x = torch.max(x, dim=1)[0]
elif self._method == 'avg':
x = x * mask.unsqueeze(-1).float()
mask = mask.view(b, t, -1).any(dim=-1)
valid_lengths = mask.float().sum(dim=-1).int()
x = x.sum(dim=1)
x = x.div(valid_lengths.clamp(min=1).unsqueeze(-1).expand(x.
size()).float())
elif self._method == 'sum':
x = x * mask.unsqueeze(-1).float()
x = x.sum(dim=1)
else:
raise NotImplementedError(
f"{self._method} not available options are: 'max', 'avg', 'sum'"
)
return x
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return []
|
TemporalConvNet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/vw/cvwzmrf7u7j4rr5x5bjsxorzvl7rkn2io72j5uk3mjnapf4ukldf.py
# Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface]
# Source node to ATen node mapping:
# _weight_norm => div, mul, pow_1, pow_2, sum_1
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_2, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1, 2], True), kwargs = {})
# %pow_2 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %pow_2), kwargs = {})
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %div), kwargs = {})
triton_per_fused__weight_norm_interface_0 = async_compile.triton('triton_per_fused__weight_norm_interface_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__weight_norm_interface_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__weight_norm_interface_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = libdevice.sqrt(tmp5)
tmp8 = tmp7 / tmp6
tmp9 = tmp0 * tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp6, xmask)
tl.store(out_ptr0 + (r1 + (16*x0)), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/us/cuszefbp54tio7ijawigk6x3kiozi24lvkniflharoogdunet6gz.py
# Topologically Sorted Source Nodes: [input_2, input_3], Original ATen: [aten.clone, aten.leaky_relu]
# Source node to ATen node mapping:
# input_2 => clone
# input_3 => gt, mul_1, where
# Graph fragment:
# %clone : [num_users=3] = call_function[target=torch.ops.aten.clone.default](args = (%slice_3,), kwargs = {memory_format: torch.contiguous_format})
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%clone, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%clone, 0.01), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %clone, %mul_1), kwargs = {})
triton_poi_fused_clone_leaky_relu_1 = async_compile.triton('triton_poi_fused_clone_leaky_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_leaky_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x3 = (xindex // 4)
x1 = (xindex // 4) % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (7*x3)), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x4), tmp4, xmask)
tl.store(out_ptr1 + (x4), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/gu/cguj5hgyett7b56my2fou2ckvi5c4ddzlxuljqxslhtwaynrxcrm.py
# Topologically Sorted Source Nodes: [input_6, input_7, add, input_9], Original ATen: [aten.clone, aten.leaky_relu, aten.add]
# Source node to ATen node mapping:
# add => add
# input_6 => clone_2
# input_7 => gt_1, mul_3, where_1
# input_9 => gt_2, mul_4, where_2
# Graph fragment:
# %clone_2 : [num_users=3] = call_function[target=torch.ops.aten.clone.default](args = (%slice_6,), kwargs = {memory_format: torch.contiguous_format})
# %gt_1 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%clone_2, 0), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%clone_2, 0.01), kwargs = {})
# %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %clone_2, %mul_3), kwargs = {})
# %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_1, %primals_4), kwargs = {})
# %gt_2 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add, 0), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 0.01), kwargs = {})
# %where_2 : [num_users=3] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %add, %mul_4), kwargs = {})
triton_poi_fused_add_clone_leaky_relu_2 = async_compile.triton('triton_poi_fused_add_clone_leaky_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: '*i1', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clone_leaky_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clone_leaky_relu_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x3 = (xindex // 4)
x1 = (xindex // 4) % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (7*x3)), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + (x4), xmask)
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp9 = tmp7 + tmp8
tmp10 = tmp9 > tmp3
tmp11 = tmp9 * tmp5
tmp12 = tl.where(tmp10, tmp9, tmp11)
tl.store(out_ptr0 + (x4), tmp4, xmask)
tl.store(out_ptr1 + (x4), tmp10, xmask)
tl.store(out_ptr2 + (x4), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/cz/cczquphr6vfu577chuf3a6d6vxnh2bs46exrbr43au2fvdk53xeh.py
# Topologically Sorted Source Nodes: [input_11, input_12], Original ATen: [aten.clone, aten.leaky_relu]
# Source node to ATen node mapping:
# input_11 => clone_4
# input_12 => gt_3, mul_6, where_3
# Graph fragment:
# %clone_4 : [num_users=3] = call_function[target=torch.ops.aten.clone.default](args = (%slice_9,), kwargs = {memory_format: torch.contiguous_format})
# %gt_3 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%clone_4, 0), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%clone_4, 0.01), kwargs = {})
# %where_3 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_3, %clone_4, %mul_6), kwargs = {})
triton_poi_fused_clone_leaky_relu_3 = async_compile.triton('triton_poi_fused_clone_leaky_relu_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_leaky_relu_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_leaky_relu_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x3 = (xindex // 4)
x1 = (xindex // 4) % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (10*x3)), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x4), tmp4, xmask)
tl.store(out_ptr1 + (x4), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/3l/c3lnniuqhb77nsmkmcq6xxcu2aykk5aops2bzcw6zp6jbb4vhypx.py
# Topologically Sorted Source Nodes: [input_15, input_16, add_1, input_18], Original ATen: [aten.clone, aten.leaky_relu, aten.add]
# Source node to ATen node mapping:
# add_1 => add_1
# input_15 => clone_6
# input_16 => gt_4, mul_8, where_4
# input_18 => gt_5, mul_9, where_5
# Graph fragment:
# %clone_6 : [num_users=3] = call_function[target=torch.ops.aten.clone.default](args = (%slice_12,), kwargs = {memory_format: torch.contiguous_format})
# %gt_4 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%clone_6, 0), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%clone_6, 0.01), kwargs = {})
# %where_4 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_4, %clone_6, %mul_8), kwargs = {})
# %add_1 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_4, %where_2), kwargs = {})
# %gt_5 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_1, 0), kwargs = {})
# %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, 0.01), kwargs = {})
# %where_5 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_5, %add_1, %mul_9), kwargs = {})
triton_poi_fused_add_clone_leaky_relu_4 = async_compile.triton('triton_poi_fused_add_clone_leaky_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: '*i1', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clone_leaky_relu_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clone_leaky_relu_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x3 = (xindex // 4)
x1 = (xindex // 4) % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (10*x3)), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + (x4), xmask)
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp9 = tmp7 + tmp8
tmp10 = tmp9 > tmp3
tmp11 = tmp9 * tmp5
tmp12 = tl.where(tmp10, tmp9, tmp11)
tl.store(out_ptr0 + (x4), tmp4, xmask)
tl.store(out_ptr1 + (x4), tmp10, xmask)
tl.store(out_ptr2 + (x4), tmp12, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 1), (1, 1, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_5, (4, 1, 1), (1, 1, 1))
assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 1, 1), (1, 1, 1))
assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_10, (4, ), (1, ))
assert_size_stride(primals_11, (4, 1, 1), (1, 1, 1))
assert_size_stride(primals_12, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_13, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 1, 1), (1, 1, 1), 0); del buf0 # reuse
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface]
stream0 = get_raw_stream(0)
triton_per_fused__weight_norm_interface_0.run(buf1, primals_2, primals_1, buf2, 4, 16, grid=grid(4), stream=stream0)
# Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(primals_4, buf2, stride=(1,), padding=(3,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 7), (28, 7, 1))
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_2, input_3], Original ATen: [aten.clone, aten.leaky_relu]
triton_poi_fused_clone_leaky_relu_1.run(buf3, primals_3, buf4, buf5, 64, grid=grid(64), stream=stream0)
del buf3
del primals_3
buf6 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32)
buf7 = reinterpret_tensor(buf6, (4, 1, 1), (1, 1, 1), 0); del buf6 # reuse
buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_1], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_0.run(buf7, primals_6, primals_5, buf8, 4, 16, grid=grid(4), stream=stream0)
# Topologically Sorted Source Nodes: [input_5], Original ATen: [aten.convolution]
buf9 = extern_kernels.convolution(buf5, buf8, stride=(1,), padding=(3,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf9, (4, 4, 7), (28, 7, 1))
buf10 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_6, input_7, add, input_9], Original ATen: [aten.clone, aten.leaky_relu, aten.add]
triton_poi_fused_add_clone_leaky_relu_2.run(buf9, primals_7, primals_4, buf10, buf11, buf12, 64, grid=grid(64), stream=stream0)
del buf9
del primals_7
buf13 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32)
buf14 = reinterpret_tensor(buf13, (4, 1, 1), (1, 1, 1), 0); del buf13 # reuse
buf15 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_2], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_0.run(buf14, primals_9, primals_8, buf15, 4, 16, grid=grid(4), stream=stream0)
# Topologically Sorted Source Nodes: [input_10], Original ATen: [aten.convolution]
buf16 = extern_kernels.convolution(buf12, buf15, stride=(1,), padding=(6,), dilation=(2,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf16, (4, 4, 10), (40, 10, 1))
buf17 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf18 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_11, input_12], Original ATen: [aten.clone, aten.leaky_relu]
triton_poi_fused_clone_leaky_relu_3.run(buf16, primals_10, buf17, buf18, 64, grid=grid(64), stream=stream0)
del buf16
del primals_10
buf19 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32)
buf20 = reinterpret_tensor(buf19, (4, 1, 1), (1, 1, 1), 0); del buf19 # reuse
buf21 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_3], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_0.run(buf20, primals_12, primals_11, buf21, 4, 16, grid=grid(4), stream=stream0)
# Topologically Sorted Source Nodes: [input_14], Original ATen: [aten.convolution]
buf22 = extern_kernels.convolution(buf18, buf21, stride=(1,), padding=(6,), dilation=(2,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf22, (4, 4, 10), (40, 10, 1))
buf23 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf24 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf25 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_15, input_16, add_1, input_18], Original ATen: [aten.clone, aten.leaky_relu, aten.add]
triton_poi_fused_add_clone_leaky_relu_4.run(buf22, primals_13, buf12, buf23, buf24, buf25, 64, grid=grid(64), stream=stream0)
del buf22
del primals_13
return (buf25, buf2, buf8, buf15, buf21, primals_1, primals_2, primals_4, primals_5, primals_6, primals_8, primals_9, primals_11, primals_12, buf1, buf2, buf4, buf5, buf7, buf8, buf10, buf11, buf12, buf14, buf15, buf17, buf18, buf20, buf21, buf23, buf24, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 1, 1), (1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 1, 1), (1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 1, 1), (1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, 1, 1), (1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from torch.nn.utils import weight_norm
class Chomp1d(nn.Module):
def __init__(self, chomp_size):
super(Chomp1d, self).__init__()
self.chomp_size = chomp_size
def forward(self, x, pad_right=True):
return x[:, :, :-self.chomp_size].contiguous() if pad_right else x[
:, :, self.chomp_size:]
class TemporalBlock(nn.Module):
def __init__(self, n_inputs, n_outputs, kernel_size, stride, dilation,
padding, dropout):
super(TemporalBlock, self).__init__()
self.conv1 = weight_norm(nn.Conv1d(n_inputs, n_outputs, kernel_size,
stride=stride, padding=padding, dilation=dilation))
self.chomp1 = Chomp1d(padding)
self.relu1 = nn.LeakyReLU(negative_slope=0.01, inplace=False)
self.dropout1 = nn.Dropout(dropout)
self.conv2 = weight_norm(nn.Conv1d(n_outputs, n_outputs,
kernel_size, stride=stride, padding=padding, dilation=dilation))
self.chomp2 = Chomp1d(padding)
self.relu2 = nn.LeakyReLU(negative_slope=0.01, inplace=False)
self.dropout2 = nn.Dropout(dropout)
self.net = nn.Sequential(self.conv1, self.chomp1, self.relu1, self.
dropout1, self.conv2, self.chomp2, self.relu2, self.dropout2)
self.downsample = nn.Conv1d(n_inputs, n_outputs, 1
) if n_inputs != n_outputs else None
self.relu = nn.LeakyReLU(negative_slope=0.01, inplace=False)
self.init_weights()
def init_weights(self):
self.conv1.weight.data.normal_(0, 0.01)
self.conv2.weight.data.normal_(0, 0.01)
if self.downsample is not None:
self.downsample.weight.data.normal_(0, 0.01)
def forward(self, x):
out = self.net(x)
res = x if self.downsample is None else self.downsample(x)
return self.relu(out + res)
class TemporalConvNet(nn.Module):
def __init__(self, num_inputs, num_channels, kernel_size, stride, dropout):
super(TemporalConvNet, self).__init__()
self.network = nn.Sequential()
for i, nch in enumerate(num_channels):
dilation = 2 ** i
in_channels = num_inputs if i == 0 else num_channels[i - 1]
out_channels = num_channels[i]
padding = (kernel_size - 1) * dilation
self.network.add_module('tblock' + str(i), TemporalBlock(
n_inputs=in_channels, n_outputs=out_channels, kernel_size=
kernel_size, stride=stride, dilation=dilation, padding=
padding, dropout=dropout))
def forward(self, x):
return self.network(x)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'num_inputs': 4, 'num_channels': [4, 4], 'kernel_size': 4,
'stride': 1, 'dropout': 0.5}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
from torch.nn.utils import weight_norm
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__weight_norm_interface_0(in_out_ptr0, in_ptr0, in_ptr1,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = libdevice.sqrt(tmp5)
tmp8 = tmp7 / tmp6
tmp9 = tmp0 * tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
tl.store(out_ptr0 + (r1 + 16 * x0), tmp9, xmask)
@triton.jit
def triton_poi_fused_clone_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x3 = xindex // 4
x1 = xindex // 4 % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 7 * x3), xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr1 + x4, tmp7, xmask)
@triton.jit
def triton_poi_fused_add_clone_leaky_relu_2(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x3 = xindex // 4
x1 = xindex // 4 % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 7 * x3), xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + x4, xmask)
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp9 = tmp7 + tmp8
tmp10 = tmp9 > tmp3
tmp11 = tmp9 * tmp5
tmp12 = tl.where(tmp10, tmp9, tmp11)
tl.store(out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr1 + x4, tmp10, xmask)
tl.store(out_ptr2 + x4, tmp12, xmask)
@triton.jit
def triton_poi_fused_clone_leaky_relu_3(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x3 = xindex // 4
x1 = xindex // 4 % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 10 * x3), xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr1 + x4, tmp7, xmask)
@triton.jit
def triton_poi_fused_add_clone_leaky_relu_4(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x3 = xindex // 4
x1 = xindex // 4 % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 10 * x3), xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + x4, xmask)
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp9 = tmp7 + tmp8
tmp10 = tmp9 > tmp3
tmp11 = tmp9 * tmp5
tmp12 = tl.where(tmp10, tmp9, tmp11)
tl.store(out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr1 + x4, tmp10, xmask)
tl.store(out_ptr2 + x4, tmp12, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (4, 1, 1), (1, 1, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_5, (4, 1, 1), (1, 1, 1))
assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 1, 1), (1, 1, 1))
assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4, 1, 1), (1, 1, 1))
assert_size_stride(primals_12, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_13, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 1, 1), (1, 1, 1), 0)
del buf0
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused__weight_norm_interface_0[grid(4)](buf1, primals_2,
primals_1, buf2, 4, 16, XBLOCK=1, num_warps=2, num_stages=1)
buf3 = extern_kernels.convolution(primals_4, buf2, stride=(1,),
padding=(3,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 7), (28, 7, 1))
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_clone_leaky_relu_1[grid(64)](buf3, primals_3, buf4,
buf5, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf3
del primals_3
buf6 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32)
buf7 = reinterpret_tensor(buf6, (4, 1, 1), (1, 1, 1), 0)
del buf6
buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_per_fused__weight_norm_interface_0[grid(4)](buf7, primals_6,
primals_5, buf8, 4, 16, XBLOCK=1, num_warps=2, num_stages=1)
buf9 = extern_kernels.convolution(buf5, buf8, stride=(1,), padding=
(3,), dilation=(1,), transposed=False, output_padding=(0,),
groups=1, bias=None)
assert_size_stride(buf9, (4, 4, 7), (28, 7, 1))
buf10 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_clone_leaky_relu_2[grid(64)](buf9, primals_7,
primals_4, buf10, buf11, buf12, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del buf9
del primals_7
buf13 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32)
buf14 = reinterpret_tensor(buf13, (4, 1, 1), (1, 1, 1), 0)
del buf13
buf15 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_per_fused__weight_norm_interface_0[grid(4)](buf14, primals_9,
primals_8, buf15, 4, 16, XBLOCK=1, num_warps=2, num_stages=1)
buf16 = extern_kernels.convolution(buf12, buf15, stride=(1,),
padding=(6,), dilation=(2,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf16, (4, 4, 10), (40, 10, 1))
buf17 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf18 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_clone_leaky_relu_3[grid(64)](buf16, primals_10,
buf17, buf18, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf16
del primals_10
buf19 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32)
buf20 = reinterpret_tensor(buf19, (4, 1, 1), (1, 1, 1), 0)
del buf19
buf21 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_per_fused__weight_norm_interface_0[grid(4)](buf20,
primals_12, primals_11, buf21, 4, 16, XBLOCK=1, num_warps=2,
num_stages=1)
buf22 = extern_kernels.convolution(buf18, buf21, stride=(1,),
padding=(6,), dilation=(2,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf22, (4, 4, 10), (40, 10, 1))
buf23 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf24 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf25 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_clone_leaky_relu_4[grid(64)](buf22, primals_13,
buf12, buf23, buf24, buf25, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del buf22
del primals_13
return (buf25, buf2, buf8, buf15, buf21, primals_1, primals_2,
primals_4, primals_5, primals_6, primals_8, primals_9, primals_11,
primals_12, buf1, buf2, buf4, buf5, buf7, buf8, buf10, buf11, buf12,
buf14, buf15, buf17, buf18, buf20, buf21, buf23, buf24)
class Chomp1d(nn.Module):
def __init__(self, chomp_size):
super(Chomp1d, self).__init__()
self.chomp_size = chomp_size
def forward(self, x, pad_right=True):
return x[:, :, :-self.chomp_size].contiguous() if pad_right else x[
:, :, self.chomp_size:]
class TemporalBlock(nn.Module):
def __init__(self, n_inputs, n_outputs, kernel_size, stride, dilation,
padding, dropout):
super(TemporalBlock, self).__init__()
self.conv1 = weight_norm(nn.Conv1d(n_inputs, n_outputs, kernel_size,
stride=stride, padding=padding, dilation=dilation))
self.chomp1 = Chomp1d(padding)
self.relu1 = nn.LeakyReLU(negative_slope=0.01, inplace=False)
self.dropout1 = nn.Dropout(dropout)
self.conv2 = weight_norm(nn.Conv1d(n_outputs, n_outputs,
kernel_size, stride=stride, padding=padding, dilation=dilation))
self.chomp2 = Chomp1d(padding)
self.relu2 = nn.LeakyReLU(negative_slope=0.01, inplace=False)
self.dropout2 = nn.Dropout(dropout)
self.net = nn.Sequential(self.conv1, self.chomp1, self.relu1, self.
dropout1, self.conv2, self.chomp2, self.relu2, self.dropout2)
self.downsample = nn.Conv1d(n_inputs, n_outputs, 1
) if n_inputs != n_outputs else None
self.relu = nn.LeakyReLU(negative_slope=0.01, inplace=False)
self.init_weights()
def init_weights(self):
self.conv1.weight.data.normal_(0, 0.01)
self.conv2.weight.data.normal_(0, 0.01)
if self.downsample is not None:
self.downsample.weight.data.normal_(0, 0.01)
def forward(self, x):
out = self.net(x)
res = x if self.downsample is None else self.downsample(x)
return self.relu(out + res)
class TemporalConvNetNew(nn.Module):
def __init__(self, num_inputs, num_channels, kernel_size, stride, dropout):
super(TemporalConvNetNew, self).__init__()
self.network = nn.Sequential()
for i, nch in enumerate(num_channels):
dilation = 2 ** i
in_channels = num_inputs if i == 0 else num_channels[i - 1]
out_channels = num_channels[i]
padding = (kernel_size - 1) * dilation
self.network.add_module('tblock' + str(i), TemporalBlock(
n_inputs=in_channels, n_outputs=out_channels, kernel_size=
kernel_size, stride=stride, dilation=dilation, padding=
padding, dropout=dropout))
def forward(self, input_0):
primals_3 = self.network.tblock0.conv1.bias
primals_1 = self.network.tblock0.conv1.weight_g
primals_2 = self.network.tblock0.conv1.weight_v
primals_7 = self.network.tblock0.conv2.bias
primals_5 = self.network.tblock0.conv2.weight_g
primals_4 = self.network.tblock0.conv2.weight_v
primals_10 = self.network.tblock1.conv1.bias
primals_8 = self.network.tblock1.conv1.weight_g
primals_6 = self.network.tblock1.conv1.weight_v
primals_13 = self.network.tblock1.conv2.bias
primals_11 = self.network.tblock1.conv2.weight_g
primals_9 = self.network.tblock1.conv2.weight_v
primals_12 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0]
| ddcas/singing-language-identification | TemporalConvNet | false | 1,835 | [
"MIT"
] | 0 | d104419b196d56d4de37cff47c32e88e28c58690 | https://github.com/ddcas/singing-language-identification/tree/d104419b196d56d4de37cff47c32e88e28c58690 | import torch
import torch.nn as nn
from torch.nn.utils import weight_norm
class Chomp1d(nn.Module):
def __init__(self, chomp_size):
super().__init__()
self.chomp_size = chomp_size
def forward(self, x, pad_right=True):
return x[:, :, :-self.chomp_size].contiguous() if pad_right else x[
:, :, self.chomp_size:]
class TemporalBlock(nn.Module):
def __init__(self, n_inputs, n_outputs, kernel_size, stride, dilation,
padding, dropout):
super().__init__()
self.conv1 = weight_norm(nn.Conv1d(n_inputs, n_outputs, kernel_size,
stride=stride, padding=padding, dilation=dilation))
self.chomp1 = Chomp1d(padding)
self.relu1 = nn.LeakyReLU(negative_slope=0.01, inplace=False)
self.dropout1 = nn.Dropout(dropout)
self.conv2 = weight_norm(nn.Conv1d(n_outputs, n_outputs,
kernel_size, stride=stride, padding=padding, dilation=dilation))
self.chomp2 = Chomp1d(padding)
self.relu2 = nn.LeakyReLU(negative_slope=0.01, inplace=False)
self.dropout2 = nn.Dropout(dropout)
self.net = nn.Sequential(self.conv1, self.chomp1, self.relu1, self.
dropout1, self.conv2, self.chomp2, self.relu2, self.dropout2)
self.downsample = nn.Conv1d(n_inputs, n_outputs, 1
) if n_inputs != n_outputs else None
self.relu = nn.LeakyReLU(negative_slope=0.01, inplace=False)
self.init_weights()
def init_weights(self):
self.conv1.weight.data.normal_(0, 0.01)
self.conv2.weight.data.normal_(0, 0.01)
if self.downsample is not None:
self.downsample.weight.data.normal_(0, 0.01)
def forward(self, x):
out = self.net(x)
res = x if self.downsample is None else self.downsample(x)
return self.relu(out + res)
class Model(nn.Module):
def __init__(self, num_inputs, num_channels, kernel_size, stride, dropout):
super().__init__()
self.network = nn.Sequential()
for i, nch in enumerate(num_channels):
dilation = 2 ** i
in_channels = num_inputs if i == 0 else num_channels[i - 1]
out_channels = num_channels[i]
padding = (kernel_size - 1) * dilation
self.network.add_module('tblock' + str(i), TemporalBlock(
n_inputs=in_channels, n_outputs=out_channels, kernel_size=
kernel_size, stride=stride, dilation=dilation, padding=
padding, dropout=dropout))
def forward(self, x):
return self.network(x)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'num_inputs': 4, 'num_channels': [4, 4], 'kernel_size': 4,
'stride': 1, 'dropout': 0.5}]
|
NaiveTorchNet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/6q/c6qtcqlhzp2spzvdv6knpwm32alhfras7qga7rybepicf6poy4sy.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# x => sigmoid
# Graph fragment:
# %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_1,), kwargs = {})
triton_poi_fused_sigmoid_0 = async_compile.triton('triton_poi_fused_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_0(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.sigmoid(tmp0)
tl.store(in_out_ptr0 + (x0), tmp1, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.sigmoid]
stream0 = get_raw_stream(0)
triton_poi_fused_sigmoid_0.run(buf1, 256, grid=grid(256), stream=stream0)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.sigmoid]
triton_poi_fused_sigmoid_0.run(buf3, 256, grid=grid(256), stream=stream0)
return (buf3, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), buf1, buf3, primals_3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.autograd
import torch.optim as optim
class NaiveTorchNet(nn.Module):
"""A reimplementation of from-scratch NaiveNet using PyTorch"""
def __init__(self, input_nodes, hidden_nodes, output_nodes, learn_rate=0.1
):
super().__init__()
self.hidden = nn.Linear(input_nodes, hidden_nodes, bias=False)
self.output = nn.Linear(hidden_nodes, output_nodes, bias=False)
self.lr = learn_rate
self.activation_function = nn.Sigmoid()
self.optimizer = optim.SGD(self.parameters(), lr=learn_rate)
self.loss_function = nn.MSELoss()
def forward(self, x):
"""Overrides the built in"""
x = self.activation_function(self.hidden(x))
x = self.activation_function(self.output(x))
return x
def query(self, inputs):
"""Takes an input to the net and returns an output via forward computation"""
if type(inputs) != torch.autograd.variable.Variable:
inputs = Variable(torch.Tensor(inputs))
return {'i': inputs, 'fo': self.forward(inputs)}
def learn(self, targets, input_layers):
if type(targets) != torch.autograd.variable.Variable:
targets = Variable(torch.Tensor(targets))
final_outputs = input_layers['fo']
output_errors = self.loss_function(final_outputs, targets)
self.optimizer.zero_grad()
output_errors.backward()
self.optimizer.step()
return output_errors, final_outputs
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_nodes': 4, 'hidden_nodes': 4, 'output_nodes': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.autograd import Variable
import torch.nn as nn
import torch.autograd
import torch.optim as optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_sigmoid_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tl.store(in_out_ptr0 + x0, tmp1, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_sigmoid_0[grid(256)](buf1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused_sigmoid_0[grid(256)](buf3, 256, XBLOCK=128,
num_warps=4, num_stages=1)
return buf3, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0
), buf1, buf3, primals_3
class NaiveTorchNetNew(nn.Module):
"""A reimplementation of from-scratch NaiveNet using PyTorch"""
def __init__(self, input_nodes, hidden_nodes, output_nodes, learn_rate=0.1
):
super().__init__()
self.hidden = nn.Linear(input_nodes, hidden_nodes, bias=False)
self.output = nn.Linear(hidden_nodes, output_nodes, bias=False)
self.lr = learn_rate
self.activation_function = nn.Sigmoid()
self.optimizer = optim.SGD(self.parameters(), lr=learn_rate)
self.loss_function = nn.MSELoss()
def query(self, inputs):
"""Takes an input to the net and returns an output via forward computation"""
if type(inputs) != torch.autograd.variable.Variable:
inputs = Variable(torch.Tensor(inputs))
return {'i': inputs, 'fo': self.forward(inputs)}
def learn(self, targets, input_layers):
if type(targets) != torch.autograd.variable.Variable:
targets = Variable(torch.Tensor(targets))
final_outputs = input_layers['fo']
output_errors = self.loss_function(final_outputs, targets)
self.optimizer.zero_grad()
output_errors.backward()
self.optimizer.step()
return output_errors, final_outputs
def forward(self, input_0):
primals_1 = self.hidden.weight
primals_3 = self.output.weight
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| deo1/deo1 | NaiveTorchNet | false | 1,836 | [
"MIT"
] | 0 | 36671f12269d3bd662d746e8b9f66c22255c9df7 | https://github.com/deo1/deo1/tree/36671f12269d3bd662d746e8b9f66c22255c9df7 | import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.autograd
import torch.optim as optim
class Model(nn.Module):
"""A reimplementation of from-scratch NaiveNet using PyTorch"""
def __init__(self, input_nodes, hidden_nodes, output_nodes, learn_rate=0.1
):
super().__init__()
self.hidden = nn.Linear(input_nodes, hidden_nodes, bias=False)
self.output = nn.Linear(hidden_nodes, output_nodes, bias=False)
self.lr = learn_rate
self.activation_function = nn.Sigmoid()
self.optimizer = optim.SGD(self.parameters(), lr=learn_rate)
self.loss_function = nn.MSELoss()
def forward(self, x):
"""Overrides the built in"""
x = self.activation_function(self.hidden(x))
x = self.activation_function(self.output(x))
return x
def query(self, inputs):
"""Takes an input to the net and returns an output via forward computation"""
if type(inputs) != torch.autograd.variable.Variable:
inputs = Variable(torch.Tensor(inputs))
return {'i': inputs, 'fo': self.forward(inputs)}
def learn(self, targets, input_layers):
if type(targets) != torch.autograd.variable.Variable:
targets = Variable(torch.Tensor(targets))
final_outputs = input_layers['fo']
output_errors = self.loss_function(final_outputs, targets)
self.optimizer.zero_grad()
output_errors.backward()
self.optimizer.step()
return output_errors, final_outputs
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
Attn | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/r6/cr6neze6yovkog6kjrk5k2db63h47ozkojywfys6karxe7dlumrz.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [2], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/kj/ckjtlefzavjukjsytvkak6ek26zmzexpcbnlwelx4k5kascjxlf3.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [energy], Original ATen: [aten.bmm]
extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), (16, 1, 4), 0), out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_0.run(buf0, buf1, 64, grid=grid(64), stream=stream0)
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf1, buf2, 64, grid=grid(64), stream=stream0)
del buf1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class Attn(nn.Module):
def __init__(self, hidden_size, batch_size=1, method='dot'):
super(Attn, self).__init__()
self.method = method
self.hidden_size = hidden_size
self.batch_size = batch_size
if self.method == 'general':
self.attn = nn.Linear(self.hidden_size, hidden_size, bias=False)
elif self.method == 'concat':
self.attn = nn.Linear(self.hidden_size * 2, hidden_size, bias=False
)
self.v = nn.Parameter(torch.FloatTensor(batch_size, 1, hidden_size)
)
def forward(self, hidden, encoder_outputs):
attn_energies = self.score(hidden, encoder_outputs)
return F.softmax(attn_energies, dim=2)
def score(self, hidden, encoder_output):
if self.method == 'general':
energy = self.attn(encoder_output)
energy = energy.transpose(2, 1)
energy = hidden.bmm(energy)
return energy
elif self.method == 'concat':
hidden = hidden * encoder_output.new_ones(encoder_output.size())
energy = self.attn(torch.cat((hidden, encoder_output), -1))
energy = energy.transpose(2, 1)
energy = self.v.bmm(energy)
return energy
else:
encoder_output = encoder_output.transpose(2, 1)
energy = hidden.bmm(encoder_output)
return energy
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), (
16, 1, 4), 0), out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(64)](buf0, buf1, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf2 = buf0
del buf0
triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf1
return buf2,
class AttnNew(nn.Module):
def __init__(self, hidden_size, batch_size=1, method='dot'):
super(AttnNew, self).__init__()
self.method = method
self.hidden_size = hidden_size
self.batch_size = batch_size
if self.method == 'general':
self.attn = nn.Linear(self.hidden_size, hidden_size, bias=False)
elif self.method == 'concat':
self.attn = nn.Linear(self.hidden_size * 2, hidden_size, bias=False
)
self.v = nn.Parameter(torch.FloatTensor(batch_size, 1, hidden_size)
)
def score(self, hidden, encoder_output):
if self.method == 'general':
energy = self.attn(encoder_output)
energy = energy.transpose(2, 1)
energy = hidden.bmm(energy)
return energy
elif self.method == 'concat':
hidden = hidden * encoder_output.new_ones(encoder_output.size())
energy = self.attn(torch.cat((hidden, encoder_output), -1))
energy = energy.transpose(2, 1)
energy = self.v.bmm(energy)
return energy
else:
encoder_output = encoder_output.transpose(2, 1)
energy = hidden.bmm(encoder_output)
return energy
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| dhpollack/mgc | Attn | false | 1,837 | [
"MIT"
] | 0 | ed1b8fb512f0b42cb8121a2809def65f232dc154 | https://github.com/dhpollack/mgc/tree/ed1b8fb512f0b42cb8121a2809def65f232dc154 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, hidden_size, batch_size=1, method='dot'):
super().__init__()
self.method = method
self.hidden_size = hidden_size
self.batch_size = batch_size
if self.method == 'general':
self.attn = nn.Linear(self.hidden_size, hidden_size, bias=False)
elif self.method == 'concat':
self.attn = nn.Linear(self.hidden_size * 2, hidden_size, bias=False
)
self.v = nn.Parameter(torch.FloatTensor(batch_size, 1, hidden_size)
)
def forward(self, hidden, encoder_outputs):
attn_energies = self.score(hidden, encoder_outputs)
return F.softmax(attn_energies, dim=2)
def score(self, hidden, encoder_output):
if self.method == 'general':
energy = self.attn(encoder_output)
energy = energy.transpose(2, 1)
energy = hidden.bmm(energy)
return energy
elif self.method == 'concat':
hidden = hidden * encoder_output.new_ones(encoder_output.size())
energy = self.attn(torch.cat((hidden, encoder_output), -1))
energy = energy.transpose(2, 1)
energy = self.v.bmm(energy)
return energy
else:
encoder_output = encoder_output.transpose(2, 1)
energy = hidden.bmm(encoder_output)
return energy
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [4]
|
PositiveLinear | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/ty/cty37mdgrnip6wbqgqs3drjcntx7pvywyzaoeutpaqwvbvqydhd4.py
# Topologically Sorted Source Nodes: [transformed_weight], Original ATen: [aten.clamp, aten.ge]
# Source node to ATen node mapping:
# transformed_weight => clamp_min
# Graph fragment:
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%primals_1, 0), kwargs = {})
# %ge : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%primals_1, 0), kwargs = {})
triton_poi_fused_clamp_ge_0 = async_compile.triton('triton_poi_fused_clamp_ge_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_ge_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clamp_ge_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = tmp0 >= tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
tl.store(out_ptr1 + (x0), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
# Topologically Sorted Source Nodes: [transformed_weight], Original ATen: [aten.clamp, aten.ge]
stream0 = get_raw_stream(0)
triton_poi_fused_clamp_ge_0.run(primals_1, buf0, buf2, 16, grid=grid(16), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del buf0
del primals_2
return (reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class PositiveLinear(nn.Linear):
"""Applies a transformation to the incoming data of the following form: :math:`y_i = xlog(exp(A)+1)^T`
where log and exp are elementwise operations.
Args:
in_features: size of each input sample
out_features: size of each output sample
bias: If set to False, the layer will not learn an additive bias.
Default: ``True``
Shape:
- Input: :math:`(N, *, in\\_features)` where :math:`*` means any number of
additional dimensions
- Output: :math:`(N, *, out\\_features)` where all but the last dimension
are the same shape as the input.
Attributes:
weight: the learnable weights of the module of shape
`(out_features x in_features)`
bias: the learnable bias of the module of shape `(out_features)`
Examples::
>>> m = nn.PositiveLinear(20, 30)
>>> input = torch.randn(128, 20)
>>> output = m(input)
>>> print(output.size())
"""
def forward(self, input):
transformed_weight = torch.clamp(self.weight, min=0)
torch.clamp(self.bias, min=0)
return F.linear(input, transformed_weight, self.bias)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clamp_ge_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = tmp0 >= tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr1 + x0, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_clamp_ge_0[grid(16)](primals_1, buf0, buf2, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf1)
del buf0
del primals_2
return reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2
class PositiveLinearNew(nn.Linear):
"""Applies a transformation to the incoming data of the following form: :math:`y_i = xlog(exp(A)+1)^T`
where log and exp are elementwise operations.
Args:
in_features: size of each input sample
out_features: size of each output sample
bias: If set to False, the layer will not learn an additive bias.
Default: ``True``
Shape:
- Input: :math:`(N, *, in\\_features)` where :math:`*` means any number of
additional dimensions
- Output: :math:`(N, *, out\\_features)` where all but the last dimension
are the same shape as the input.
Attributes:
weight: the learnable weights of the module of shape
`(out_features x in_features)`
bias: the learnable bias of the module of shape `(out_features)`
Examples::
>>> m = nn.PositiveLinear(20, 30)
>>> input = torch.randn(128, 20)
>>> output = m(input)
>>> print(output.size())
"""
def forward(self, input_0):
primals_1 = self.weight
primals_2 = self.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| dfioravanti/copula_vae | PositiveLinear | false | 1,838 | [
"MIT"
] | 0 | 4fdadfb9ca65a75367d50df4a5848942de20741f | https://github.com/dfioravanti/copula_vae/tree/4fdadfb9ca65a75367d50df4a5848942de20741f | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Linear):
"""Applies a transformation to the incoming data of the following form: :math:`y_i = xlog(exp(A)+1)^T`
where log and exp are elementwise operations.
Args:
in_features: size of each input sample
out_features: size of each output sample
bias: If set to False, the layer will not learn an additive bias.
Default: ``True``
Shape:
- Input: :math:`(N, *, in\\_features)` where :math:`*` means any number of
additional dimensions
- Output: :math:`(N, *, out\\_features)` where all but the last dimension
are the same shape as the input.
Attributes:
weight: the learnable weights of the module of shape
`(out_features x in_features)`
bias: the learnable bias of the module of shape `(out_features)`
Examples::
>>> m = nn.PositiveLinear(20, 30)
>>> input = torch.randn(128, 20)
>>> output = m(input)
>>> print(output.size())
"""
def forward(self, input):
transformed_weight = torch.clamp(self.weight, min=0)
torch.clamp(self.bias, min=0)
return F.linear(input, transformed_weight, self.bias)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
PrimaryCaps | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/dj/cdjtpyt5zghcvffzxa6wkjmodj3o7syavliioml7niive7obzd4e.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536, 128], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 65536
xnumel = 81
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = (yindex // 256)
tmp0 = tl.load(in_ptr0 + (x2 + (81*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (256*x2) + (20736*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/ge/cge7bnkkv3lovy5jtwqk5sazg4aouf273qc7u6jafiuctxmosb55.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024, 4096], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 1024
xnumel = 4096
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = (yindex // 256)
tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), None, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (256*x2) + (1048576*y1)), tmp0, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/r6/cr6ia57wuoco53lexvas2s6mswc63dlghrk4z5v2yjt6qpkp6y5y.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x => convolution
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 802816
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/yv/cyv75ix3abbpjnp4kuxktubs5khwx735tm64lqxzehoe3xz4cgsy.py
# Topologically Sorted Source Nodes: [pow_1, squared_norm], Original ATen: [aten.pow, aten.sum]
# Source node to ATen node mapping:
# pow_1 => pow_1
# squared_norm => sum_1
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_1, 2), kwargs = {})
# %sum_1 : [num_users=4] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [-1], True), kwargs = {})
triton_red_fused_pow_sum_3 = async_compile.triton('triton_red_fused_pow_sum_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[8192, 128],
reduction_hint=ReductionHint.OUTER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_pow_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused_pow_sum_3(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 6272
rnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 8
x1 = (xindex // 8) % 196
x2 = (xindex // 1568)
_tmp3 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
x4 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r3 = rindex
tmp0 = tl.load(in_ptr0 + ((32*x0) + (256*((r3 + (128*x1)) % 784)) + (200704*x2) + ((r3 + (128*x1)) // 784)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = _tmp3 + tmp2
_tmp3 = tl.where(rmask & xmask, tmp4, _tmp3)
tmp3 = tl.sum(_tmp3, 1)[:, None]
tl.store(out_ptr0 + (x4), tmp3, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/4d/c4ddwhtzgzsmxbeu7xngnhm32yxegk443x2wgjyupklf36lrrckw.py
# Topologically Sorted Source Nodes: [pow_1, squared_norm], Original ATen: [aten.pow, aten.sum]
# Source node to ATen node mapping:
# pow_1 => pow_1
# squared_norm => sum_1
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_1, 2), kwargs = {})
# %sum_1 : [num_users=4] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [-1], True), kwargs = {})
triton_red_fused_pow_sum_4 = async_compile.triton('triton_red_fused_pow_sum_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[32, 256],
reduction_hint=ReductionHint.OUTER_TINY,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_pow_sum_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused_pow_sum_4(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 32
rnumel = 196
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 8
x1 = (xindex // 8)
_tmp2 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
x3 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp0 = tl.load(in_ptr0 + (x0 + (8*r2) + (1568*x1)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = _tmp2 + tmp1
_tmp2 = tl.where(rmask & xmask, tmp3, _tmp2)
tmp2 = tl.sum(_tmp2, 1)[:, None]
tl.store(out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/qv/cqvdqjqmaz7ljgkjsc6bws625p5iycl6ax3p5cznxw3ydqrjge63.py
# Topologically Sorted Source Nodes: [mul, add, sqrt, mul_1, output_tensor], Original ATen: [aten.mul, aten.add, aten.sqrt, aten.div]
# Source node to ATen node mapping:
# add => add
# mul => mul
# mul_1 => mul_1
# output_tensor => div
# sqrt => sqrt
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, %view_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1.0), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%sum_1,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, %sqrt), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, %mul_1), kwargs = {})
triton_poi_fused_add_div_mul_sqrt_5 = async_compile.triton('triton_poi_fused_add_div_mul_sqrt_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_sqrt_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mul_sqrt_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 802816
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = (xindex // 25088)
x0 = xindex % 25088
x1 = (xindex // 25088) % 8
x2 = (xindex // 200704)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x3), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + ((32*x1) + (256*(x0 % 784)) + (200704*x2) + (x0 // 784)), None, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp0 + tmp3
tmp5 = libdevice.sqrt(tmp0)
tmp6 = tmp4 * tmp5
tmp7 = tmp2 / tmp6
tl.store(out_ptr0 + (x4), tmp7, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (256, 256, 9, 9), (20736, 81, 9, 1))
assert_size_stride(primals_2, (256, ), (1, ))
assert_size_stride(primals_3, (4, 256, 64, 64), (1048576, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((256, 256, 9, 9), (20736, 1, 2304, 256), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_1, buf0, 65536, 81, grid=grid(65536, 81), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((4, 256, 64, 64), (1048576, 1, 16384, 256), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_3, buf1, 1024, 4096, grid=grid(1024, 4096), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, buf0, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 256, 28, 28), (200704, 1, 7168, 256))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf3, primals_2, 802816, grid=grid(802816), stream=stream0)
del primals_2
buf4 = empty_strided_cuda((4, 8, 1, 196), (1568, 1, 6272, 8), torch.float32)
# Topologically Sorted Source Nodes: [pow_1, squared_norm], Original ATen: [aten.pow, aten.sum]
triton_red_fused_pow_sum_3.run(buf3, buf4, 6272, 128, grid=grid(6272), stream=stream0)
buf5 = empty_strided_cuda((4, 8, 1), (8, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [pow_1, squared_norm], Original ATen: [aten.pow, aten.sum]
triton_red_fused_pow_sum_4.run(buf4, buf5, 32, 196, grid=grid(32), stream=stream0)
del buf4
buf6 = empty_strided_cuda((4, 8, 25088), (200704, 25088, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, add, sqrt, mul_1, output_tensor], Original ATen: [aten.mul, aten.add, aten.sqrt, aten.div]
triton_poi_fused_add_div_mul_sqrt_5.run(buf5, buf3, buf6, 802816, grid=grid(802816), stream=stream0)
return (reinterpret_tensor(buf6, (4, 25088, 8), (200704, 1, 25088), 0), buf0, buf1, buf3, buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((256, 256, 9, 9), (20736, 81, 9, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 256, 64, 64), (1048576, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class PrimaryCaps(nn.Module):
"""
输入:(B,C,H,W)=(B,256,20,20)
输出:(B,C_N,C_L)=(B,32*6*6, 8)=(B,1152,8)
C_N:capsule_num,胶囊的个数
C_L:capsule_length,每个胶囊的长度
"""
def __init__(self, capsule_length=8, in_channels=256, out_channels=32,
capsule_num=32 * 6 * 6, kernel_size=9, stride=2, padding=0):
super(PrimaryCaps, self).__init__()
self.capsule_length = capsule_length
self.capsule_num = capsule_num
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=
out_channels * capsule_length, kernel_size=kernel_size, stride=
stride, padding=padding)
def forward(self, x):
"""
:param x: (B,C,H,W) -> (B,256,20,20)
:return: (B,C_N,C_L) -> (100,32*6*6,8) = (100,1152,8)
"""
x = self.conv(x)
x = self.toCapsules(x)
return x
def toCapsules(self, x):
B = x.size(0)
x.size(1)
H = x.size(2)
W = x.size(3)
x = x.reshape(B, self.capsule_length, -1, H, W)
x = x.reshape(B, self.capsule_length, -1)
x = self.squash(x)
x = x.permute(0, 2, 1)
return x
def squash(self, input_tensor):
"""
input_tensor: (B, 1, 10, 16)
return: output_tensor: (B, 1, 10, 16)
"""
squared_norm = (input_tensor ** 2).sum(-1, keepdim=True)
output_tensor = squared_norm * input_tensor / ((1.0 + squared_norm) *
torch.sqrt(squared_norm))
return output_tensor
def get_inputs():
return [torch.rand([4, 256, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 81
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = tl.load(in_ptr0 + (x2 + 81 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 256 * x2 + 20736 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), None, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 256 * x2 + 1048576 * y1), tmp0, None)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, None)
@triton.jit
def triton_red_fused_pow_sum_3(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 6272
rnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 8
x1 = xindex // 8 % 196
x2 = xindex // 1568
_tmp3 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
x4 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r3 = rindex
tmp0 = tl.load(in_ptr0 + (32 * x0 + 256 * ((r3 + 128 * x1) % 784) +
200704 * x2 + (r3 + 128 * x1) // 784), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = _tmp3 + tmp2
_tmp3 = tl.where(rmask & xmask, tmp4, _tmp3)
tmp3 = tl.sum(_tmp3, 1)[:, None]
tl.store(out_ptr0 + x4, tmp3, xmask)
@triton.jit
def triton_red_fused_pow_sum_4(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 32
rnumel = 196
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 8
x1 = xindex // 8
_tmp2 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
x3 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp0 = tl.load(in_ptr0 + (x0 + 8 * r2 + 1568 * x1), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = _tmp2 + tmp1
_tmp2 = tl.where(rmask & xmask, tmp3, _tmp2)
tmp2 = tl.sum(_tmp2, 1)[:, None]
tl.store(out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_div_mul_sqrt_5(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex // 25088
x0 = xindex % 25088
x1 = xindex // 25088 % 8
x2 = xindex // 200704
x4 = xindex
tmp0 = tl.load(in_ptr0 + x3, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (32 * x1 + 256 * (x0 % 784) + 200704 * x2 + x0 //
784), None, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp0 + tmp3
tmp5 = libdevice.sqrt(tmp0)
tmp6 = tmp4 * tmp5
tmp7 = tmp2 / tmp6
tl.store(out_ptr0 + x4, tmp7, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (256, 256, 9, 9), (20736, 81, 9, 1))
assert_size_stride(primals_2, (256,), (1,))
assert_size_stride(primals_3, (4, 256, 64, 64), (1048576, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((256, 256, 9, 9), (20736, 1, 2304, 256),
torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(65536, 81)](primals_1, buf0, 65536, 81,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 256, 64, 64), (1048576, 1, 16384, 256
), torch.float32)
triton_poi_fused_1[grid(1024, 4096)](primals_3, buf1, 1024, 4096,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_3
buf2 = extern_kernels.convolution(buf1, buf0, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 256, 28, 28), (200704, 1, 7168, 256))
buf3 = buf2
del buf2
triton_poi_fused_convolution_2[grid(802816)](buf3, primals_2,
802816, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf4 = empty_strided_cuda((4, 8, 1, 196), (1568, 1, 6272, 8), torch
.float32)
triton_red_fused_pow_sum_3[grid(6272)](buf3, buf4, 6272, 128,
XBLOCK=64, RBLOCK=8, num_warps=4, num_stages=1)
buf5 = empty_strided_cuda((4, 8, 1), (8, 1, 1), torch.float32)
triton_red_fused_pow_sum_4[grid(32)](buf4, buf5, 32, 196, XBLOCK=2,
RBLOCK=256, num_warps=4, num_stages=1)
del buf4
buf6 = empty_strided_cuda((4, 8, 25088), (200704, 25088, 1), torch.
float32)
triton_poi_fused_add_div_mul_sqrt_5[grid(802816)](buf5, buf3, buf6,
802816, XBLOCK=1024, num_warps=4, num_stages=1)
return reinterpret_tensor(buf6, (4, 25088, 8), (200704, 1, 25088), 0
), buf0, buf1, buf3, buf5
class PrimaryCapsNew(nn.Module):
"""
输入:(B,C,H,W)=(B,256,20,20)
输出:(B,C_N,C_L)=(B,32*6*6, 8)=(B,1152,8)
C_N:capsule_num,胶囊的个数
C_L:capsule_length,每个胶囊的长度
"""
def __init__(self, capsule_length=8, in_channels=256, out_channels=32,
capsule_num=32 * 6 * 6, kernel_size=9, stride=2, padding=0):
super(PrimaryCapsNew, self).__init__()
self.capsule_length = capsule_length
self.capsule_num = capsule_num
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=
out_channels * capsule_length, kernel_size=kernel_size, stride=
stride, padding=padding)
def toCapsules(self, x):
B = x.size(0)
x.size(1)
H = x.size(2)
W = x.size(3)
x = x.reshape(B, self.capsule_length, -1, H, W)
x = x.reshape(B, self.capsule_length, -1)
x = self.squash(x)
x = x.permute(0, 2, 1)
return x
def squash(self, input_tensor):
"""
input_tensor: (B, 1, 10, 16)
return: output_tensor: (B, 1, 10, 16)
"""
squared_norm = (input_tensor ** 2).sum(-1, keepdim=True)
output_tensor = squared_norm * input_tensor / ((1.0 + squared_norm) *
torch.sqrt(squared_norm))
return output_tensor
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| daxiongpro/pytorch-tutorial | PrimaryCaps | false | 1,839 | [
"MIT"
] | 0 | abafc32f7ee1092024085f703e4ced51ce358a1b | https://github.com/daxiongpro/pytorch-tutorial/tree/abafc32f7ee1092024085f703e4ced51ce358a1b | import torch
import torch.nn as nn
class Model(nn.Module):
"""
输入:(B,C,H,W)=(B,256,20,20)
输出:(B,C_N,C_L)=(B,32*6*6, 8)=(B,1152,8)
C_N:capsule_num,胶囊的个数
C_L:capsule_length,每个胶囊的长度
"""
def __init__(self, capsule_length=8, in_channels=256, out_channels=32,
capsule_num=32 * 6 * 6, kernel_size=9, stride=2, padding=0):
super().__init__()
self.capsule_length = capsule_length
self.capsule_num = capsule_num
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=
out_channels * capsule_length, kernel_size=kernel_size, stride=
stride, padding=padding)
def forward(self, x):
"""
:param x: (B,C,H,W) -> (B,256,20,20)
:return: (B,C_N,C_L) -> (100,32*6*6,8) = (100,1152,8)
"""
x = self.conv(x)
x = self.toCapsules(x)
return x
def toCapsules(self, x):
B = x.size(0)
x.size(1)
H = x.size(2)
W = x.size(3)
x = x.reshape(B, self.capsule_length, -1, H, W)
x = x.reshape(B, self.capsule_length, -1)
x = self.squash(x)
x = x.permute(0, 2, 1)
return x
def squash(self, input_tensor):
"""
input_tensor: (B, 1, 10, 16)
return: output_tensor: (B, 1, 10, 16)
"""
squared_norm = (input_tensor ** 2).sum(-1, keepdim=True)
output_tensor = squared_norm * input_tensor / ((1.0 + squared_norm) *
torch.sqrt(squared_norm))
return output_tensor
def get_inputs():
return [torch.rand([4, 256, 64, 64])]
def get_init_inputs():
return []
|
LearnMaskedDefault | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/kq/ckqpgjkefsaygrl3b3ktu2kqjnxztj4afoz6gn2iexdub7n3vrx2.py
# Topologically Sorted Source Nodes: [mask, float_1, mul, sub, mul_1, x], Original ATen: [aten.any, aten._to_copy, aten.mul, aten.rsub, aten.add]
# Source node to ATen node mapping:
# float_1 => convert_element_type
# mask => any_1
# mul => mul
# mul_1 => mul_1
# sub => sub
# x => add
# Graph fragment:
# %any_1 : [num_users=1] = call_function[target=torch.ops.aten.any.dim](args = (%view, -1), kwargs = {})
# %convert_element_type : [num_users=2] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%unsqueeze_2, torch.float32), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %convert_element_type), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %convert_element_type), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, %sub), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
triton_per_fused__to_copy_add_any_mul_rsub_0 = async_compile.triton('triton_per_fused__to_copy_add_any_mul_rsub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__to_copy_add_any_mul_rsub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__to_copy_add_any_mul_rsub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
r2 = rindex % 4
tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0)
tmp6 = tl.load(in_ptr1 + (r1 + (64*x0)), xmask, other=0.0)
tmp9 = tl.load(in_ptr2 + (r2), None, eviction_policy='evict_last')
tmp1 = (tmp0 != 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = triton_helpers.any(tmp4, 1)[:, None]
tmp7 = tmp5.to(tl.float32)
tmp8 = tmp6 * tmp7
tmp10 = 1.0
tmp11 = tmp10 - tmp7
tmp12 = tmp9 * tmp11
tmp13 = tmp8 + tmp12
tl.store(out_ptr1 + (r1 + (64*x0)), tmp13, xmask)
tl.store(out_ptr0 + (x0), tmp5, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, ), (1, ), torch.bool)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mask, float_1, mul, sub, mul_1, x], Original ATen: [aten.any, aten._to_copy, aten.mul, aten.rsub, aten.add]
stream0 = get_raw_stream(0)
triton_per_fused__to_copy_add_any_mul_rsub_0.run(primals_1, primals_2, primals_3, buf0, buf1, 4, 64, grid=grid(4), stream=stream0)
del primals_1
del primals_2
del primals_3
return (buf1, reinterpret_tensor(buf0, (4, 1, 1, 1), (1, 1, 1, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
import torch.nn
class LearnMaskedDefault(nn.Module):
"""
Learns default values to fill invalid entries within input tensors. The
invalid entries are represented by a mask which is passed into forward alongside
the input tensor. Note the default value is only used if all entries in the batch row are
invalid rather than just a portion of invalid entries within each batch row.
"""
def __init__(self, feature_dim: 'int', init_method: 'str'='gaussian',
freeze: 'bool'=False):
"""
Args:
feature_dim (int): the size of the default value parameter, this must match the
input tensor size.
init_method (str): the initial default value parameter. Options:
'guassian'
'zeros'
freeze (bool): If True, the learned default parameter weights are frozen.
"""
super().__init__()
if init_method == 'zeros':
self._learned_defaults = nn.Parameter(torch.zeros(feature_dim),
requires_grad=not freeze)
elif init_method == 'gaussian':
self._learned_defaults = nn.Parameter(torch.Tensor(feature_dim),
requires_grad=not freeze)
nn.init.normal_(self._learned_defaults)
else:
raise NotImplementedError(
f"{init_method} not available. Options are: 'zeros' or 'gaussian'"
)
def forward(self, x: 'torch.Tensor', mask: 'torch.Tensor') ->torch.Tensor:
"""
Args:
x (torch.Tensor): tensor of shape (batch_size, feature_dim).
mask (torch.Tensor): bool tensor of shape (batch_size, seq_len) If all elements
in the batch dimension are False the learned default parameter is used for
that batch element.
Returns:
Tensor with shape (batch_size, feature_dim)
"""
mask = mask.view(mask.shape[0], -1).any(dim=-1)
for i in range(1, x.dim()):
mask = mask.unsqueeze(i)
x = x * mask.float() + self._learned_defaults * (1 - mask.float())
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'feature_dim': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__to_copy_add_any_mul_rsub_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
r2 = rindex % 4
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp6 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0)
tmp9 = tl.load(in_ptr2 + r2, None, eviction_policy='evict_last')
tmp1 = tmp0 != 0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = triton_helpers.any(tmp4, 1)[:, None]
tmp7 = tmp5.to(tl.float32)
tmp8 = tmp6 * tmp7
tmp10 = 1.0
tmp11 = tmp10 - tmp7
tmp12 = tmp9 * tmp11
tmp13 = tmp8 + tmp12
tl.store(out_ptr1 + (r1 + 64 * x0), tmp13, xmask)
tl.store(out_ptr0 + x0, tmp5, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.bool)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused__to_copy_add_any_mul_rsub_0[grid(4)](primals_1,
primals_2, primals_3, buf0, buf1, 4, 64, XBLOCK=1, num_warps=2,
num_stages=1)
del primals_1
del primals_2
del primals_3
return buf1, reinterpret_tensor(buf0, (4, 1, 1, 1), (1, 1, 1, 1), 0)
class LearnMaskedDefaultNew(nn.Module):
"""
Learns default values to fill invalid entries within input tensors. The
invalid entries are represented by a mask which is passed into forward alongside
the input tensor. Note the default value is only used if all entries in the batch row are
invalid rather than just a portion of invalid entries within each batch row.
"""
def __init__(self, feature_dim: 'int', init_method: 'str'='gaussian',
freeze: 'bool'=False):
"""
Args:
feature_dim (int): the size of the default value parameter, this must match the
input tensor size.
init_method (str): the initial default value parameter. Options:
'guassian'
'zeros'
freeze (bool): If True, the learned default parameter weights are frozen.
"""
super().__init__()
if init_method == 'zeros':
self._learned_defaults = nn.Parameter(torch.zeros(feature_dim),
requires_grad=not freeze)
elif init_method == 'gaussian':
self._learned_defaults = nn.Parameter(torch.Tensor(feature_dim),
requires_grad=not freeze)
nn.init.normal_(self._learned_defaults)
else:
raise NotImplementedError(
f"{init_method} not available. Options are: 'zeros' or 'gaussian'"
)
def forward(self, input_0, input_1):
primals_3 = self._learned_defaults
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3])
return output[0]
| denred0/pytorchvideo | LearnMaskedDefault | false | 1,840 | [
"Apache-2.0"
] | 0 | d874bfc9969895d2afcedea2e12bae5e1bcfb809 | https://github.com/denred0/pytorchvideo/tree/d874bfc9969895d2afcedea2e12bae5e1bcfb809 | import torch
import torch.nn as nn
import torch.utils.data
import torch.nn
class Model(nn.Module):
"""
Learns default values to fill invalid entries within input tensors. The
invalid entries are represented by a mask which is passed into forward alongside
the input tensor. Note the default value is only used if all entries in the batch row are
invalid rather than just a portion of invalid entries within each batch row.
"""
def __init__(self, feature_dim: 'int', init_method: 'str'='gaussian',
freeze: 'bool'=False):
"""
Args:
feature_dim (int): the size of the default value parameter, this must match the
input tensor size.
init_method (str): the initial default value parameter. Options:
'guassian'
'zeros'
freeze (bool): If True, the learned default parameter weights are frozen.
"""
super().__init__()
if init_method == 'zeros':
self._learned_defaults = nn.Parameter(torch.zeros(feature_dim),
requires_grad=not freeze)
elif init_method == 'gaussian':
self._learned_defaults = nn.Parameter(torch.Tensor(feature_dim),
requires_grad=not freeze)
nn.init.normal_(self._learned_defaults)
else:
raise NotImplementedError(
f"{init_method} not available. Options are: 'zeros' or 'gaussian'"
)
def forward(self, x: 'torch.Tensor', mask: 'torch.Tensor') ->torch.Tensor:
"""
Args:
x (torch.Tensor): tensor of shape (batch_size, feature_dim).
mask (torch.Tensor): bool tensor of shape (batch_size, seq_len) If all elements
in the batch dimension are False the learned default parameter is used for
that batch element.
Returns:
Tensor with shape (batch_size, feature_dim)
"""
mask = mask.view(mask.shape[0], -1).any(dim=-1)
for i in range(1, x.dim()):
mask = mask.unsqueeze(i)
x = x * mask.float() + self._learned_defaults * (1 - mask.float())
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
SynthWide | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/3r/c3rslv2exiiurkywxt4oxifqli4zlmoxh26mfj7b2xmxbacdj7n6.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 96
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = (yindex // 3)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (3*x2) + (27*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/5b/c5brnjme4e4oybuabwsko4vuljormwjqoawce7jgxo5fbkhzx55r.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4096], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 12
xnumel = 4096
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = (yindex // 3)
tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (3*x2) + (12288*y1)), tmp0, ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/wv/cwvtp6qflpb42kxrujmda5zselv7wvkz3fgp2tryo2ftsisaildr.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 2048
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = (yindex // 32)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (32*x2) + (288*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/nw/cnwm6ljuusoqjcwr2jdx6p2ue7ldghxjdr3oe62stiuqhsboiczy.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 8192
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = (yindex // 64)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (64*x2) + (576*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/ih/cihu7ohoiwwrblocurozhw6ihpzbq4oc43mseo4n6wd7ronp74tw.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_4 = async_compile.triton('triton_poi_fused_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 32768
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = (yindex // 128)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (128*x2) + (1152*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/hw/chwsynwj6wtnil363fykcg6p5ozgedz7shusvmf2pjbiyde64f5p.py
# Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# relu => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_5 = async_compile.triton('triton_poi_fused_convolution_relu_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/mb/cmbayu3hdrubakb2fbiffe5lr36qd34va3du4yogkj7lkrixlhv6.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_6 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_6(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = (xindex // 32) % 32
x2 = (xindex // 1024)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1) + (4096*x2)), None)
tmp1 = tl.load(in_ptr0 + (32 + x0 + (64*x1) + (4096*x2)), None)
tmp3 = tl.load(in_ptr0 + (2048 + x0 + (64*x1) + (4096*x2)), None)
tmp5 = tl.load(in_ptr0 + (2080 + x0 + (64*x1) + (4096*x2)), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x3), tmp6, None)
tl.store(out_ptr1 + (x3), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/ya/cya72dxxug7bvahrgkiz2tev6wxbq4aissg3wd3pl37yen37nb4b.py
# Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# relu_1 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_7 = async_compile.triton('triton_poi_fused_convolution_relu_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/xj/cxjptd7j2qxrb3kjd7zlgxmewdvhhkbw3tukgvay2kmhnxcajkzw.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_1 => getitem_2, getitem_3
# Graph fragment:
# %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_8 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_8(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 64
x1 = (xindex // 64) % 16
x2 = (xindex // 1024)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (128*x1) + (4096*x2)), None)
tmp1 = tl.load(in_ptr0 + (64 + x0 + (128*x1) + (4096*x2)), None)
tmp3 = tl.load(in_ptr0 + (2048 + x0 + (128*x1) + (4096*x2)), None)
tmp5 = tl.load(in_ptr0 + (2112 + x0 + (128*x1) + (4096*x2)), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x3), tmp6, None)
tl.store(out_ptr1 + (x3), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/v5/cv5bres457ho44iaqr63mi3bbgzezc3pxml5sotwsljao2g5whrl.py
# Topologically Sorted Source Nodes: [conv2d_2, relu_2], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# relu_2 => relu_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
triton_poi_fused_convolution_relu_9 = async_compile.triton('triton_poi_fused_convolution_relu_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/sr/csr7z2afgh7gbn3y7lq2wp2sva4b7imt3iniu36uxe33zsilt4x7.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_2 => getitem_4, getitem_5
# Graph fragment:
# %getitem_4 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 0), kwargs = {})
# %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_10 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_10(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 128
x1 = (xindex // 128) % 8
x2 = (xindex // 1024)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (256*x1) + (4096*x2)), None)
tmp1 = tl.load(in_ptr0 + (128 + x0 + (256*x1) + (4096*x2)), None)
tmp3 = tl.load(in_ptr0 + (2048 + x0 + (256*x1) + (4096*x2)), None)
tmp5 = tl.load(in_ptr0 + (2176 + x0 + (256*x1) + (4096*x2)), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x3), tmp6, None)
tl.store(out_ptr1 + (x3), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/lx/clxvms4hq26tsvprssavxtjdwd56o5l3alm52doxgzcn7cyo335z.py
# Topologically Sorted Source Nodes: [conv2d_3, relu_3], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_3 => convolution_3
# relu_3 => relu_3
# Graph fragment:
# %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_4, %primals_8, %primals_9, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {})
triton_poi_fused_convolution_relu_11 = async_compile.triton('triton_poi_fused_convolution_relu_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_11(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/4c/c4cgfrrnnjaqqu2mw37mbl7gs2lmyooybwlmphux5q3nafqldr5r.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_3 => _low_memory_max_pool2d_with_offsets_3, getitem_7
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets_3 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu_3, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %getitem_7 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_3, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_12 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 256], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_12(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 256
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y5 = yindex
y4 = (yindex // 16)
y6 = yindex % 16
tmp0 = tl.load(in_ptr0 + (x2 + (512*y0) + (4096*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (256 + x2 + (512*y0) + (4096*y1)), xmask & ymask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2048 + x2 + (512*y0) + (4096*y1)), xmask & ymask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2304 + x2 + (512*y0) + (4096*y1)), xmask & ymask, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1, 1], 1, tl.int8)
tmp4 = tl.full([1, 1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1, 1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1, 1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x2 + (256*y5)), tmp15, xmask & ymask)
tl.store(out_ptr1 + (y6 + (16*x2) + (4096*y4)), tmp16, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (32, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (32, ), (1, ))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (64, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (64, ), (1, ))
assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (128, ), (1, ))
assert_size_stride(primals_8, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_9, (256, ), (1, ))
assert_size_stride(primals_10, (10, 4096), (4096, 1))
assert_size_stride(primals_11, (10, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((32, 3, 3, 3), (27, 1, 9, 3), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_1, buf0, 96, 9, grid=grid(96, 9), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_3, buf1, 12, 4096, grid=grid(12, 4096), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((64, 32, 3, 3), (288, 1, 96, 32), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(primals_4, buf2, 2048, 9, grid=grid(2048, 9), stream=stream0)
del primals_4
buf3 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_3.run(primals_6, buf3, 8192, 9, grid=grid(8192, 9), stream=stream0)
del primals_6
buf4 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_4.run(primals_8, buf4, 32768, 9, grid=grid(32768, 9), stream=stream0)
del primals_8
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 32, 64, 64), (131072, 1, 2048, 32))
buf6 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_5.run(buf6, primals_2, 524288, grid=grid(524288), stream=stream0)
del primals_2
buf7 = empty_strided_cuda((4, 32, 32, 32), (32768, 1, 1024, 32), torch.float32)
buf8 = empty_strided_cuda((4, 32, 32, 32), (32768, 1, 1024, 32), torch.int8)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_6.run(buf6, buf7, buf8, 131072, grid=grid(131072), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf9 = extern_kernels.convolution(buf7, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 64, 32, 32), (65536, 1, 2048, 64))
buf10 = buf9; del buf9 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_7.run(buf10, primals_5, 262144, grid=grid(262144), stream=stream0)
del primals_5
buf11 = empty_strided_cuda((4, 64, 16, 16), (16384, 1, 1024, 64), torch.float32)
buf12 = empty_strided_cuda((4, 64, 16, 16), (16384, 1, 1024, 64), torch.int8)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_8.run(buf10, buf11, buf12, 65536, grid=grid(65536), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf13 = extern_kernels.convolution(buf11, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 128, 16, 16), (32768, 1, 2048, 128))
buf14 = buf13; del buf13 # reuse
# Topologically Sorted Source Nodes: [conv2d_2, relu_2], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_9.run(buf14, primals_7, 131072, grid=grid(131072), stream=stream0)
del primals_7
buf15 = empty_strided_cuda((4, 128, 8, 8), (8192, 1, 1024, 128), torch.float32)
buf16 = empty_strided_cuda((4, 128, 8, 8), (8192, 1, 1024, 128), torch.int8)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_10.run(buf14, buf15, buf16, 32768, grid=grid(32768), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf17 = extern_kernels.convolution(buf15, buf4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf17, (4, 256, 8, 8), (16384, 1, 2048, 256))
buf18 = buf17; del buf17 # reuse
# Topologically Sorted Source Nodes: [conv2d_3, relu_3], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_11.run(buf18, primals_9, 65536, grid=grid(65536), stream=stream0)
del primals_9
buf19 = empty_strided_cuda((4, 256, 4, 4), (4096, 1, 1024, 256), torch.int8)
buf20 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_12.run(buf18, buf19, buf20, 64, 256, grid=grid(64, 256), stream=stream0)
buf21 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_11, reinterpret_tensor(buf20, (4, 4096), (4096, 1), 0), reinterpret_tensor(primals_10, (4096, 10), (1, 4096), 0), alpha=1, beta=1, out=buf21)
del primals_11
return (buf21, buf0, buf1, buf2, buf3, buf4, buf6, buf7, buf8, buf10, buf11, buf12, buf14, buf15, buf16, buf18, buf19, reinterpret_tensor(buf20, (4, 4096), (4096, 1), 0), primals_10, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((32, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((128, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((256, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((10, 4096), (4096, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class SynthWide(nn.Module):
def __init__(self, num_c=10, f=1):
super(SynthWide, self).__init__()
self.pool = nn.MaxPool2d(2, 2)
self.conv1 = nn.Conv2d(3, 32 * f, 3, padding=1)
self.conv2 = nn.Conv2d(32 * f, 64 * f, 3, padding=1)
self.conv3 = nn.Conv2d(64 * f, 128 * f, 3, padding=1)
self.conv4 = nn.Conv2d(128 * f, 256, 3, padding=1)
self.fc1 = nn.Linear(256 * 4 * 4, num_c)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = self.pool(F.relu(self.conv3(x)))
x = self.pool(F.relu(self.conv4(x)))
x = x.view(-1, 256 * 4 * 4)
x = self.fc1(x)
return x
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 96
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 27 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 12
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = yindex // 32
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 32 * x2 + 288 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_6(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = xindex // 32 % 32
x2 = xindex // 1024
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1 + 4096 * x2), None)
tmp1 = tl.load(in_ptr0 + (32 + x0 + 64 * x1 + 4096 * x2), None)
tmp3 = tl.load(in_ptr0 + (2048 + x0 + 64 * x1 + 4096 * x2), None)
tmp5 = tl.load(in_ptr0 + (2080 + x0 + 64 * x1 + 4096 * x2), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, None)
tl.store(out_ptr1 + x3, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_8(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 64
x1 = xindex // 64 % 16
x2 = xindex // 1024
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 4096 * x2), None)
tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 4096 * x2), None)
tmp3 = tl.load(in_ptr0 + (2048 + x0 + 128 * x1 + 4096 * x2), None)
tmp5 = tl.load(in_ptr0 + (2112 + x0 + 128 * x1 + 4096 * x2), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, None)
tl.store(out_ptr1 + x3, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_10(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 128
x1 = xindex // 128 % 8
x2 = xindex // 1024
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 256 * x1 + 4096 * x2), None)
tmp1 = tl.load(in_ptr0 + (128 + x0 + 256 * x1 + 4096 * x2), None)
tmp3 = tl.load(in_ptr0 + (2048 + x0 + 256 * x1 + 4096 * x2), None)
tmp5 = tl.load(in_ptr0 + (2176 + x0 + 256 * x1 + 4096 * x2), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, None)
tl.store(out_ptr1 + x3, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_11(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_12(in_ptr0, out_ptr0, out_ptr1,
ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 256
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y5 = yindex
y4 = yindex // 16
y6 = yindex % 16
tmp0 = tl.load(in_ptr0 + (x2 + 512 * y0 + 4096 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (256 + x2 + 512 * y0 + 4096 * y1), xmask &
ymask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2048 + x2 + 512 * y0 + 4096 * y1), xmask &
ymask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2304 + x2 + 512 * y0 + 4096 * y1), xmask &
ymask, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1, 1], 1, tl.int8)
tmp4 = tl.full([1, 1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1, 1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1, 1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x2 + 256 * y5), tmp15, xmask & ymask)
tl.store(out_ptr1 + (y6 + 16 * x2 + 4096 * y4), tmp16, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (32, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (64, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (128,), (1,))
assert_size_stride(primals_8, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_9, (256,), (1,))
assert_size_stride(primals_10, (10, 4096), (4096, 1))
assert_size_stride(primals_11, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((32, 3, 3, 3), (27, 1, 9, 3), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(96, 9)](primals_1, buf0, 96, 9, XBLOCK=16,
YBLOCK=64, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch
.float32)
triton_poi_fused_1[grid(12, 4096)](primals_3, buf1, 12, 4096,
XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((64, 32, 3, 3), (288, 1, 96, 32), torch.
float32)
triton_poi_fused_2[grid(2048, 9)](primals_4, buf2, 2048, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch
.float32)
triton_poi_fused_3[grid(8192, 9)](primals_6, buf3, 8192, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_6
buf4 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128),
torch.float32)
triton_poi_fused_4[grid(32768, 9)](primals_8, buf4, 32768, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_8
buf5 = extern_kernels.convolution(buf1, buf0, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 32, 64, 64), (131072, 1, 2048, 32))
buf6 = buf5
del buf5
triton_poi_fused_convolution_relu_5[grid(524288)](buf6, primals_2,
524288, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf7 = empty_strided_cuda((4, 32, 32, 32), (32768, 1, 1024, 32),
torch.float32)
buf8 = empty_strided_cuda((4, 32, 32, 32), (32768, 1, 1024, 32),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_6[grid(131072)](buf6, buf7,
buf8, 131072, XBLOCK=1024, num_warps=4, num_stages=1)
buf9 = extern_kernels.convolution(buf7, buf2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 64, 32, 32), (65536, 1, 2048, 64))
buf10 = buf9
del buf9
triton_poi_fused_convolution_relu_7[grid(262144)](buf10, primals_5,
262144, XBLOCK=512, num_warps=8, num_stages=1)
del primals_5
buf11 = empty_strided_cuda((4, 64, 16, 16), (16384, 1, 1024, 64),
torch.float32)
buf12 = empty_strided_cuda((4, 64, 16, 16), (16384, 1, 1024, 64),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_8[grid(65536)](buf10,
buf11, buf12, 65536, XBLOCK=256, num_warps=4, num_stages=1)
buf13 = extern_kernels.convolution(buf11, buf3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 128, 16, 16), (32768, 1, 2048, 128))
buf14 = buf13
del buf13
triton_poi_fused_convolution_relu_9[grid(131072)](buf14, primals_7,
131072, XBLOCK=512, num_warps=8, num_stages=1)
del primals_7
buf15 = empty_strided_cuda((4, 128, 8, 8), (8192, 1, 1024, 128),
torch.float32)
buf16 = empty_strided_cuda((4, 128, 8, 8), (8192, 1, 1024, 128),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_10[grid(32768)](buf14,
buf15, buf16, 32768, XBLOCK=128, num_warps=4, num_stages=1)
buf17 = extern_kernels.convolution(buf15, buf4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf17, (4, 256, 8, 8), (16384, 1, 2048, 256))
buf18 = buf17
del buf17
triton_poi_fused_convolution_relu_11[grid(65536)](buf18, primals_9,
65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_9
buf19 = empty_strided_cuda((4, 256, 4, 4), (4096, 1, 1024, 256),
torch.int8)
buf20 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch.
float32)
triton_poi_fused_max_pool2d_with_indices_12[grid(64, 256)](buf18,
buf19, buf20, 64, 256, XBLOCK=256, YBLOCK=1, num_warps=4,
num_stages=1)
buf21 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_11, reinterpret_tensor(buf20, (4, 4096
), (4096, 1), 0), reinterpret_tensor(primals_10, (4096, 10), (1,
4096), 0), alpha=1, beta=1, out=buf21)
del primals_11
return (buf21, buf0, buf1, buf2, buf3, buf4, buf6, buf7, buf8, buf10,
buf11, buf12, buf14, buf15, buf16, buf18, buf19, reinterpret_tensor
(buf20, (4, 4096), (4096, 1), 0), primals_10)
class SynthWideNew(nn.Module):
def __init__(self, num_c=10, f=1):
super(SynthWideNew, self).__init__()
self.pool = nn.MaxPool2d(2, 2)
self.conv1 = nn.Conv2d(3, 32 * f, 3, padding=1)
self.conv2 = nn.Conv2d(32 * f, 64 * f, 3, padding=1)
self.conv3 = nn.Conv2d(64 * f, 128 * f, 3, padding=1)
self.conv4 = nn.Conv2d(128 * f, 256, 3, padding=1)
self.fc1 = nn.Linear(256 * 4 * 4, num_c)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.conv4.weight
primals_9 = self.conv4.bias
primals_10 = self.fc1.weight
primals_11 = self.fc1.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
| dengliming/iotnets | SynthWide | false | 1,842 | [
"MIT"
] | 0 | db744e56769c799dbf765a27fc5aa91e3edeaaa3 | https://github.com/dengliming/iotnets/tree/db744e56769c799dbf765a27fc5aa91e3edeaaa3 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, num_c=10, f=1):
super().__init__()
self.pool = nn.MaxPool2d(2, 2)
self.conv1 = nn.Conv2d(3, 32 * f, 3, padding=1)
self.conv2 = nn.Conv2d(32 * f, 64 * f, 3, padding=1)
self.conv3 = nn.Conv2d(64 * f, 128 * f, 3, padding=1)
self.conv4 = nn.Conv2d(128 * f, 256, 3, padding=1)
self.fc1 = nn.Linear(256 * 4 * 4, num_c)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = self.pool(F.relu(self.conv3(x)))
x = self.pool(F.relu(self.conv4(x)))
x = x.view(-1, 256 * 4 * 4)
x = self.fc1(x)
return x
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return []
|
TCN_SLID | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/vw/cvwzmrf7u7j4rr5x5bjsxorzvl7rkn2io72j5uk3mjnapf4ukldf.py
# Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface]
# Source node to ATen node mapping:
# _weight_norm => div, mul, pow_1, pow_2, sum_1
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_3, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1, 2], True), kwargs = {})
# %pow_2 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_2, %pow_2), kwargs = {})
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, %div), kwargs = {})
triton_per_fused__weight_norm_interface_0 = async_compile.triton('triton_per_fused__weight_norm_interface_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__weight_norm_interface_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__weight_norm_interface_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = libdevice.sqrt(tmp5)
tmp8 = tmp7 / tmp6
tmp9 = tmp0 * tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp6, xmask)
tl.store(out_ptr0 + (r1 + (16*x0)), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/5i/c5iuv6v7eggudqpwvokzc6nbwpfull4j36zkwmtjfwwtnhluvvhi.py
# Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# input_1 => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%permute, %mul, %primals_4, [1], [3], [1], False, [0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/s6/cs6esq4gxutdvfqbp5wxymmqmea4w5gmnweryjopnykrdwm5amk2.py
# Topologically Sorted Source Nodes: [input_2, input_3], Original ATen: [aten.clone, aten.leaky_relu]
# Source node to ATen node mapping:
# input_2 => clone
# input_3 => gt, mul_1, where
# Graph fragment:
# %clone : [num_users=3] = call_function[target=torch.ops.aten.clone.default](args = (%slice_3,), kwargs = {memory_format: torch.contiguous_format})
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%clone, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%clone, 0.01), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %clone, %mul_1), kwargs = {})
triton_poi_fused_clone_leaky_relu_2 = async_compile.triton('triton_poi_fused_clone_leaky_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_leaky_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_leaky_relu_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x3 = (xindex // 4)
x1 = (xindex // 4) % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (7*x3)), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x4), tmp4, xmask)
tl.store(out_ptr1 + (x4), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/6w/c6wlczr472hjjxcqvwmsumjysp3ezdoa3khpkk64bjpckccevqn2.py
# Topologically Sorted Source Nodes: [input_6, input_7, add, input_9], Original ATen: [aten.clone, aten.leaky_relu, aten.add]
# Source node to ATen node mapping:
# add => add
# input_6 => clone_2
# input_7 => gt_1, mul_3, where_1
# input_9 => gt_2, mul_4, where_2
# Graph fragment:
# %clone_2 : [num_users=3] = call_function[target=torch.ops.aten.clone.default](args = (%slice_6,), kwargs = {memory_format: torch.contiguous_format})
# %gt_1 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%clone_2, 0), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%clone_2, 0.01), kwargs = {})
# %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %clone_2, %mul_3), kwargs = {})
# %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_1, %permute), kwargs = {})
# %gt_2 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add, 0), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 0.01), kwargs = {})
# %where_2 : [num_users=3] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %add, %mul_4), kwargs = {})
triton_poi_fused_add_clone_leaky_relu_3 = async_compile.triton('triton_poi_fused_add_clone_leaky_relu_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: '*i1', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clone_leaky_relu_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clone_leaky_relu_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = (yindex // 4)
tmp0 = tl.load(in_ptr0 + (x2 + (7*y3)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp9 = tmp7 + tmp8
tmp10 = tmp9 > tmp3
tmp11 = tmp9 * tmp5
tmp12 = tl.where(tmp10, tmp9, tmp11)
tl.store(out_ptr0 + (x2 + (4*y3)), tmp4, xmask & ymask)
tl.store(out_ptr1 + (x2 + (4*y3)), tmp10, xmask & ymask)
tl.store(out_ptr2 + (x2 + (4*y3)), tmp12, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/tp/ctpv3tuhzvgnz3fs3yalkieisgps2k7khhxg5wwtzynv4t2lclo3.py
# Topologically Sorted Source Nodes: [input_11, input_12], Original ATen: [aten.clone, aten.leaky_relu]
# Source node to ATen node mapping:
# input_11 => clone_4
# input_12 => gt_3, mul_6, where_3
# Graph fragment:
# %clone_4 : [num_users=3] = call_function[target=torch.ops.aten.clone.default](args = (%slice_9,), kwargs = {memory_format: torch.contiguous_format})
# %gt_3 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%clone_4, 0), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%clone_4, 0.01), kwargs = {})
# %where_3 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_3, %clone_4, %mul_6), kwargs = {})
triton_poi_fused_clone_leaky_relu_4 = async_compile.triton('triton_poi_fused_clone_leaky_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_leaky_relu_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_leaky_relu_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x3 = (xindex // 4)
x1 = (xindex // 4) % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (10*x3)), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x4), tmp4, xmask)
tl.store(out_ptr1 + (x4), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/fm/cfm4u32cggrpem5h3pn67wgrrvk2uivsamfskoaau3hr4tcydxf3.py
# Topologically Sorted Source Nodes: [input_15, input_16, add_1, input_18, output], Original ATen: [aten.clone, aten.leaky_relu, aten.add]
# Source node to ATen node mapping:
# add_1 => add_1
# input_15 => clone_6
# input_16 => gt_4, mul_8, where_4
# input_18 => gt_5
# output => clone_8
# Graph fragment:
# %clone_6 : [num_users=3] = call_function[target=torch.ops.aten.clone.default](args = (%slice_12,), kwargs = {memory_format: torch.contiguous_format})
# %gt_4 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%clone_6, 0), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%clone_6, 0.01), kwargs = {})
# %where_4 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_4, %clone_6, %mul_8), kwargs = {})
# %add_1 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_4, %where_2), kwargs = {})
# %gt_5 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_1, 0), kwargs = {})
# %clone_8 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_1,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_add_clone_leaky_relu_5 = async_compile.triton('triton_poi_fused_add_clone_leaky_relu_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: '*i1', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clone_leaky_relu_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clone_leaky_relu_5(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = (yindex // 4)
tmp0 = tl.load(in_ptr0 + (x2 + (10*y3)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + (x2 + (4*y3)), xmask & ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp9 = tmp7 + tmp8
tmp10 = tmp9 > tmp3
tmp11 = tmp9 * tmp5
tmp12 = tl.where(tmp10, tmp9, tmp11)
tl.store(out_ptr0 + (x2 + (4*y3)), tmp4, xmask & ymask)
tl.store(out_ptr1 + (x2 + (4*y3)), tmp10, xmask & ymask)
tl.store(out_ptr2 + (y0 + (4*x2) + (16*y1)), tmp12, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/bo/cboqxids2gle6skb62l6liqzdlripa535afaq5m42o7yizs2pqca.py
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.add]
# Source node to ATen node mapping:
# output => add_2
# Graph fragment:
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_15), kwargs = {})
triton_poi_fused_add_6 = async_compile.triton('triton_poi_fused_add_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 2
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/5r/c5r72jrislian373vvdicl3vxivbxnru6mpq5yitki3iwjxfb6pw.py
# Topologically Sorted Source Nodes: [output_3], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# output_3 => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_5, [2], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_5, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_7 = async_compile.triton('triton_poi_fused__softmax_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_7(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/ss/cssikys23rjgyx6zuolkgxgpp37foi237kzuvcgpbva6fztyiuc6.py
# Topologically Sorted Source Nodes: [output_3], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# output_3 => div_4, sum_5
# Graph fragment:
# %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2], True), kwargs = {})
# %div_4 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_5), kwargs = {})
triton_poi_fused__softmax_8 = async_compile.triton('triton_poi_fused__softmax_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_8(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 1, 1), (1, 1, 1))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, 1, 1), (1, 1, 1))
assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 1, 1), (1, 1, 1))
assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_10, (4, ), (1, ))
assert_size_stride(primals_11, (4, 1, 1), (1, 1, 1))
assert_size_stride(primals_12, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_13, (4, ), (1, ))
assert_size_stride(primals_14, (2, 4), (4, 1))
assert_size_stride(primals_15, (2, ), (1, ))
assert_size_stride(primals_16, (1, 2), (2, 1))
assert_size_stride(primals_17, (1, ), (1, ))
assert_size_stride(primals_18, (4, 1), (1, 1))
assert_size_stride(primals_19, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 1, 1), (1, 1, 1), 0); del buf0 # reuse
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface]
stream0 = get_raw_stream(0)
triton_per_fused__weight_norm_interface_0.run(buf1, primals_3, primals_2, buf2, 4, 16, grid=grid(4), stream=stream0)
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(primals_1, buf3, 16, 4, grid=grid(16, 4), stream=stream0)
# Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, buf2, stride=(1,), padding=(3,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 7), (28, 7, 1))
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf6 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [input_2, input_3], Original ATen: [aten.clone, aten.leaky_relu]
triton_poi_fused_clone_leaky_relu_2.run(buf4, primals_4, buf5, buf6, 64, grid=grid(64), stream=stream0)
del buf4
del primals_4
buf7 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32)
buf8 = reinterpret_tensor(buf7, (4, 1, 1), (1, 1, 1), 0); del buf7 # reuse
buf9 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_1], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_0.run(buf8, primals_6, primals_5, buf9, 4, 16, grid=grid(4), stream=stream0)
# Topologically Sorted Source Nodes: [input_5], Original ATen: [aten.convolution]
buf10 = extern_kernels.convolution(buf6, buf9, stride=(1,), padding=(3,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf10, (4, 4, 7), (28, 7, 1))
buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf13 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_6, input_7, add, input_9], Original ATen: [aten.clone, aten.leaky_relu, aten.add]
triton_poi_fused_add_clone_leaky_relu_3.run(buf10, primals_7, primals_1, buf11, buf12, buf13, 16, 4, grid=grid(16, 4), stream=stream0)
del buf10
del primals_7
buf14 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32)
buf15 = reinterpret_tensor(buf14, (4, 1, 1), (1, 1, 1), 0); del buf14 # reuse
buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_2], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_0.run(buf15, primals_9, primals_8, buf16, 4, 16, grid=grid(4), stream=stream0)
# Topologically Sorted Source Nodes: [input_10], Original ATen: [aten.convolution]
buf17 = extern_kernels.convolution(buf13, buf16, stride=(1,), padding=(6,), dilation=(2,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf17, (4, 4, 10), (40, 10, 1))
buf18 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf19 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_11, input_12], Original ATen: [aten.clone, aten.leaky_relu]
triton_poi_fused_clone_leaky_relu_4.run(buf17, primals_10, buf18, buf19, 64, grid=grid(64), stream=stream0)
del buf17
del primals_10
buf20 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32)
buf21 = reinterpret_tensor(buf20, (4, 1, 1), (1, 1, 1), 0); del buf20 # reuse
buf22 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm_3], Original ATen: [aten._weight_norm_interface]
triton_per_fused__weight_norm_interface_0.run(buf21, primals_12, primals_11, buf22, 4, 16, grid=grid(4), stream=stream0)
# Topologically Sorted Source Nodes: [input_14], Original ATen: [aten.convolution]
buf23 = extern_kernels.convolution(buf19, buf22, stride=(1,), padding=(6,), dilation=(2,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf23, (4, 4, 10), (40, 10, 1))
buf24 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf25 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf26 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [input_15, input_16, add_1, input_18, output], Original ATen: [aten.clone, aten.leaky_relu, aten.add]
triton_poi_fused_add_clone_leaky_relu_5.run(buf23, primals_13, buf13, buf24, buf25, buf26, 16, 4, grid=grid(16, 4), stream=stream0)
del buf23
del primals_13
buf27 = empty_strided_cuda((16, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf26, (16, 4), (4, 1), 0), reinterpret_tensor(primals_14, (4, 2), (1, 4), 0), out=buf27)
buf28 = reinterpret_tensor(buf27, (4, 4, 2), (8, 2, 1), 0); del buf27 # reuse
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.add]
triton_poi_fused_add_6.run(buf28, primals_15, 32, grid=grid(32), stream=stream0)
del primals_15
buf30 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_17, reinterpret_tensor(buf28, (16, 2), (2, 1), 0), reinterpret_tensor(primals_16, (2, 1), (1, 2), 0), alpha=1, beta=1, out=buf30)
del primals_17
buf31 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_19, buf30, reinterpret_tensor(primals_18, (1, 4), (1, 1), 0), alpha=1, beta=1, out=buf31)
del primals_19
buf32 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output_3], Original ATen: [aten._softmax]
triton_poi_fused__softmax_7.run(buf31, buf32, 64, grid=grid(64), stream=stream0)
buf33 = reinterpret_tensor(buf31, (4, 4, 4), (16, 4, 1), 0); del buf31 # reuse
# Topologically Sorted Source Nodes: [output_3], Original ATen: [aten._softmax]
triton_poi_fused__softmax_8.run(buf32, buf33, 64, grid=grid(64), stream=stream0)
del buf32
return (reinterpret_tensor(buf33, (4, 4, 4), (16, 1, 4), 0), buf2, buf9, buf16, buf22, primals_2, primals_3, primals_5, primals_6, primals_8, primals_9, primals_11, primals_12, reinterpret_tensor(primals_1, (4, 4, 4), (16, 1, 4), 0), buf1, buf2, buf5, buf6, buf8, buf9, buf11, buf12, buf13, buf15, buf16, buf18, buf19, buf21, buf22, buf24, buf25, reinterpret_tensor(buf26, (16, 4), (4, 1), 0), reinterpret_tensor(buf28, (16, 2), (2, 1), 0), buf30, buf33, primals_18, primals_16, primals_14, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 1, 1), (1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 1, 1), (1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 1, 1), (1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, 1, 1), (1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((2, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((1, 2), (2, 1), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from torch.nn.utils import weight_norm
class Chomp1d(nn.Module):
def __init__(self, chomp_size):
super(Chomp1d, self).__init__()
self.chomp_size = chomp_size
def forward(self, x, pad_right=True):
return x[:, :, :-self.chomp_size].contiguous() if pad_right else x[
:, :, self.chomp_size:]
class TemporalBlock(nn.Module):
def __init__(self, n_inputs, n_outputs, kernel_size, stride, dilation,
padding, dropout):
super(TemporalBlock, self).__init__()
self.conv1 = weight_norm(nn.Conv1d(n_inputs, n_outputs, kernel_size,
stride=stride, padding=padding, dilation=dilation))
self.chomp1 = Chomp1d(padding)
self.relu1 = nn.LeakyReLU(negative_slope=0.01, inplace=False)
self.dropout1 = nn.Dropout(dropout)
self.conv2 = weight_norm(nn.Conv1d(n_outputs, n_outputs,
kernel_size, stride=stride, padding=padding, dilation=dilation))
self.chomp2 = Chomp1d(padding)
self.relu2 = nn.LeakyReLU(negative_slope=0.01, inplace=False)
self.dropout2 = nn.Dropout(dropout)
self.net = nn.Sequential(self.conv1, self.chomp1, self.relu1, self.
dropout1, self.conv2, self.chomp2, self.relu2, self.dropout2)
self.downsample = nn.Conv1d(n_inputs, n_outputs, 1
) if n_inputs != n_outputs else None
self.relu = nn.LeakyReLU(negative_slope=0.01, inplace=False)
self.init_weights()
def init_weights(self):
self.conv1.weight.data.normal_(0, 0.01)
self.conv2.weight.data.normal_(0, 0.01)
if self.downsample is not None:
self.downsample.weight.data.normal_(0, 0.01)
def forward(self, x):
out = self.net(x)
res = x if self.downsample is None else self.downsample(x)
return self.relu(out + res)
class TemporalConvNet(nn.Module):
def __init__(self, num_inputs, num_channels, kernel_size, stride, dropout):
super(TemporalConvNet, self).__init__()
self.network = nn.Sequential()
for i, nch in enumerate(num_channels):
dilation = 2 ** i
in_channels = num_inputs if i == 0 else num_channels[i - 1]
out_channels = num_channels[i]
padding = (kernel_size - 1) * dilation
self.network.add_module('tblock' + str(i), TemporalBlock(
n_inputs=in_channels, n_outputs=out_channels, kernel_size=
kernel_size, stride=stride, dilation=dilation, padding=
padding, dropout=dropout))
def forward(self, x):
return self.network(x)
class TCN_SLID(nn.Module):
def __init__(self, size_in, size_out, list_conv_depths, size_kernel,
stride, dropout):
super(TCN_SLID, self).__init__()
self.tcn = TemporalConvNet(num_inputs=size_in, num_channels=
list_conv_depths, kernel_size=size_kernel, stride=stride,
dropout=dropout)
self.linear1 = nn.Linear(list_conv_depths[-1], list_conv_depths[-1] //
2)
self.linear2 = nn.Linear(list_conv_depths[-1] // 2,
list_conv_depths[-1] // 4)
self.linear3 = nn.Linear(list_conv_depths[-1] // 4, size_out)
self.softmax = nn.Softmax(dim=2)
def forward(self, x):
output = self.tcn(x.transpose(1, 2))
output = self.linear1(output.transpose(1, 2))
output = self.linear2(output)
output = self.linear3(output)
output = self.softmax(output)
return output.transpose(1, 2)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'size_in': 4, 'size_out': 4, 'list_conv_depths': [4, 4],
'size_kernel': 4, 'stride': 1, 'dropout': 0.5}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
from torch.nn.utils import weight_norm
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__weight_norm_interface_0(in_out_ptr0, in_ptr0, in_ptr1,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp7 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = libdevice.sqrt(tmp5)
tmp8 = tmp7 / tmp6
tmp9 = tmp0 * tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
tl.store(out_ptr0 + (r1 + 16 * x0), tmp9, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_leaky_relu_2(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x3 = xindex // 4
x1 = xindex // 4 % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 7 * x3), xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr1 + x4, tmp7, xmask)
@triton.jit
def triton_poi_fused_add_clone_leaky_relu_3(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, out_ptr2, ynumel, xnumel, YBLOCK: tl.constexpr,
XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 7 * y3), xmask & ymask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp9 = tmp7 + tmp8
tmp10 = tmp9 > tmp3
tmp11 = tmp9 * tmp5
tmp12 = tl.where(tmp10, tmp9, tmp11)
tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask)
tl.store(out_ptr1 + (x2 + 4 * y3), tmp10, xmask & ymask)
tl.store(out_ptr2 + (x2 + 4 * y3), tmp12, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_leaky_relu_4(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x3 = xindex // 4
x1 = xindex // 4 % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 10 * x3), xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr1 + x4, tmp7, xmask)
@triton.jit
def triton_poi_fused_add_clone_leaky_relu_5(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, out_ptr2, ynumel, xnumel, YBLOCK: tl.constexpr,
XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 10 * y3), xmask & ymask, eviction_policy
='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + (x2 + 4 * y3), xmask & ymask, eviction_policy=
'evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp9 = tmp7 + tmp8
tmp10 = tmp9 > tmp3
tmp11 = tmp9 * tmp5
tmp12 = tl.where(tmp10, tmp9, tmp11)
tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask)
tl.store(out_ptr1 + (x2 + 4 * y3), tmp10, xmask & ymask)
tl.store(out_ptr2 + (y0 + 4 * x2 + 16 * y1), tmp12, xmask & ymask)
@triton.jit
def triton_poi_fused_add_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 2
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_8(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 1, 1), (1, 1, 1))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 1, 1), (1, 1, 1))
assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 1, 1), (1, 1, 1))
assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4, 1, 1), (1, 1, 1))
assert_size_stride(primals_12, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_13, (4,), (1,))
assert_size_stride(primals_14, (2, 4), (4, 1))
assert_size_stride(primals_15, (2,), (1,))
assert_size_stride(primals_16, (1, 2), (2, 1))
assert_size_stride(primals_17, (1,), (1,))
assert_size_stride(primals_18, (4, 1), (1, 1))
assert_size_stride(primals_19, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 1, 1), (1, 1, 1), 0)
del buf0
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused__weight_norm_interface_0[grid(4)](buf1, primals_3,
primals_2, buf2, 4, 16, XBLOCK=1, num_warps=2, num_stages=1)
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_convolution_1[grid(16, 4)](primals_1, buf3, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf4 = extern_kernels.convolution(buf3, buf2, stride=(1,), padding=
(3,), dilation=(1,), transposed=False, output_padding=(0,),
groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 7), (28, 7, 1))
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf6 = buf3
del buf3
triton_poi_fused_clone_leaky_relu_2[grid(64)](buf4, primals_4, buf5,
buf6, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf4
del primals_4
buf7 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32)
buf8 = reinterpret_tensor(buf7, (4, 1, 1), (1, 1, 1), 0)
del buf7
buf9 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_per_fused__weight_norm_interface_0[grid(4)](buf8, primals_6,
primals_5, buf9, 4, 16, XBLOCK=1, num_warps=2, num_stages=1)
buf10 = extern_kernels.convolution(buf6, buf9, stride=(1,), padding
=(3,), dilation=(1,), transposed=False, output_padding=(0,),
groups=1, bias=None)
assert_size_stride(buf10, (4, 4, 7), (28, 7, 1))
buf11 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf13 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_clone_leaky_relu_3[grid(16, 4)](buf10,
primals_7, primals_1, buf11, buf12, buf13, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
del buf10
del primals_7
buf14 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32)
buf15 = reinterpret_tensor(buf14, (4, 1, 1), (1, 1, 1), 0)
del buf14
buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_per_fused__weight_norm_interface_0[grid(4)](buf15, primals_9,
primals_8, buf16, 4, 16, XBLOCK=1, num_warps=2, num_stages=1)
buf17 = extern_kernels.convolution(buf13, buf16, stride=(1,),
padding=(6,), dilation=(2,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf17, (4, 4, 10), (40, 10, 1))
buf18 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf19 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_clone_leaky_relu_4[grid(64)](buf17, primals_10,
buf18, buf19, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf17
del primals_10
buf20 = empty_strided_cuda((4, 1, 1), (1, 4, 4), torch.float32)
buf21 = reinterpret_tensor(buf20, (4, 1, 1), (1, 1, 1), 0)
del buf20
buf22 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_per_fused__weight_norm_interface_0[grid(4)](buf21,
primals_12, primals_11, buf22, 4, 16, XBLOCK=1, num_warps=2,
num_stages=1)
buf23 = extern_kernels.convolution(buf19, buf22, stride=(1,),
padding=(6,), dilation=(2,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf23, (4, 4, 10), (40, 10, 1))
buf24 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf25 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf26 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_clone_leaky_relu_5[grid(16, 4)](buf23,
primals_13, buf13, buf24, buf25, buf26, 16, 4, XBLOCK=4, YBLOCK
=16, num_warps=1, num_stages=1)
del buf23
del primals_13
buf27 = empty_strided_cuda((16, 2), (2, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf26, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_14, (4, 2), (1, 4), 0), out=buf27)
buf28 = reinterpret_tensor(buf27, (4, 4, 2), (8, 2, 1), 0)
del buf27
triton_poi_fused_add_6[grid(32)](buf28, primals_15, 32, XBLOCK=32,
num_warps=1, num_stages=1)
del primals_15
buf30 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_17, reinterpret_tensor(buf28, (16, 2),
(2, 1), 0), reinterpret_tensor(primals_16, (2, 1), (1, 2), 0),
alpha=1, beta=1, out=buf30)
del primals_17
buf31 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_19, buf30, reinterpret_tensor(
primals_18, (1, 4), (1, 1), 0), alpha=1, beta=1, out=buf31)
del primals_19
buf32 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_7[grid(64)](buf31, buf32, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf33 = reinterpret_tensor(buf31, (4, 4, 4), (16, 4, 1), 0)
del buf31
triton_poi_fused__softmax_8[grid(64)](buf32, buf33, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf32
return (reinterpret_tensor(buf33, (4, 4, 4), (16, 1, 4), 0), buf2, buf9,
buf16, buf22, primals_2, primals_3, primals_5, primals_6, primals_8,
primals_9, primals_11, primals_12, reinterpret_tensor(primals_1, (4,
4, 4), (16, 1, 4), 0), buf1, buf2, buf5, buf6, buf8, buf9, buf11,
buf12, buf13, buf15, buf16, buf18, buf19, buf21, buf22, buf24,
buf25, reinterpret_tensor(buf26, (16, 4), (4, 1), 0),
reinterpret_tensor(buf28, (16, 2), (2, 1), 0), buf30, buf33,
primals_18, primals_16, primals_14)
class Chomp1d(nn.Module):
def __init__(self, chomp_size):
super(Chomp1d, self).__init__()
self.chomp_size = chomp_size
def forward(self, x, pad_right=True):
return x[:, :, :-self.chomp_size].contiguous() if pad_right else x[
:, :, self.chomp_size:]
class TemporalBlock(nn.Module):
def __init__(self, n_inputs, n_outputs, kernel_size, stride, dilation,
padding, dropout):
super(TemporalBlock, self).__init__()
self.conv1 = weight_norm(nn.Conv1d(n_inputs, n_outputs, kernel_size,
stride=stride, padding=padding, dilation=dilation))
self.chomp1 = Chomp1d(padding)
self.relu1 = nn.LeakyReLU(negative_slope=0.01, inplace=False)
self.dropout1 = nn.Dropout(dropout)
self.conv2 = weight_norm(nn.Conv1d(n_outputs, n_outputs,
kernel_size, stride=stride, padding=padding, dilation=dilation))
self.chomp2 = Chomp1d(padding)
self.relu2 = nn.LeakyReLU(negative_slope=0.01, inplace=False)
self.dropout2 = nn.Dropout(dropout)
self.net = nn.Sequential(self.conv1, self.chomp1, self.relu1, self.
dropout1, self.conv2, self.chomp2, self.relu2, self.dropout2)
self.downsample = nn.Conv1d(n_inputs, n_outputs, 1
) if n_inputs != n_outputs else None
self.relu = nn.LeakyReLU(negative_slope=0.01, inplace=False)
self.init_weights()
def init_weights(self):
self.conv1.weight.data.normal_(0, 0.01)
self.conv2.weight.data.normal_(0, 0.01)
if self.downsample is not None:
self.downsample.weight.data.normal_(0, 0.01)
def forward(self, x):
out = self.net(x)
res = x if self.downsample is None else self.downsample(x)
return self.relu(out + res)
class TemporalConvNet(nn.Module):
def __init__(self, num_inputs, num_channels, kernel_size, stride, dropout):
super(TemporalConvNet, self).__init__()
self.network = nn.Sequential()
for i, nch in enumerate(num_channels):
dilation = 2 ** i
in_channels = num_inputs if i == 0 else num_channels[i - 1]
out_channels = num_channels[i]
padding = (kernel_size - 1) * dilation
self.network.add_module('tblock' + str(i), TemporalBlock(
n_inputs=in_channels, n_outputs=out_channels, kernel_size=
kernel_size, stride=stride, dilation=dilation, padding=
padding, dropout=dropout))
def forward(self, x):
return self.network(x)
class TCN_SLIDNew(nn.Module):
def __init__(self, size_in, size_out, list_conv_depths, size_kernel,
stride, dropout):
super(TCN_SLIDNew, self).__init__()
self.tcn = TemporalConvNet(num_inputs=size_in, num_channels=
list_conv_depths, kernel_size=size_kernel, stride=stride,
dropout=dropout)
self.linear1 = nn.Linear(list_conv_depths[-1], list_conv_depths[-1] //
2)
self.linear2 = nn.Linear(list_conv_depths[-1] // 2,
list_conv_depths[-1] // 4)
self.linear3 = nn.Linear(list_conv_depths[-1] // 4, size_out)
self.softmax = nn.Softmax(dim=2)
def forward(self, input_0):
primals_4 = self.tcn.network.tblock0.conv1.bias
primals_2 = self.tcn.network.tblock0.conv1.weight_g
primals_1 = self.tcn.network.tblock0.conv1.weight_v
primals_7 = self.tcn.network.tblock0.conv2.bias
primals_5 = self.tcn.network.tblock0.conv2.weight_g
primals_3 = self.tcn.network.tblock0.conv2.weight_v
primals_10 = self.tcn.network.tblock1.conv1.bias
primals_8 = self.tcn.network.tblock1.conv1.weight_g
primals_6 = self.tcn.network.tblock1.conv1.weight_v
primals_13 = self.tcn.network.tblock1.conv2.bias
primals_11 = self.tcn.network.tblock1.conv2.weight_g
primals_9 = self.tcn.network.tblock1.conv2.weight_v
primals_14 = self.linear1.weight
primals_15 = self.linear1.bias
primals_16 = self.linear2.weight
primals_17 = self.linear2.bias
primals_18 = self.linear3.weight
primals_19 = self.linear3.bias
primals_12 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19])
return output[0]
| ddcas/singing-language-identification | TCN_SLID | false | 1,843 | [
"MIT"
] | 0 | d104419b196d56d4de37cff47c32e88e28c58690 | https://github.com/ddcas/singing-language-identification/tree/d104419b196d56d4de37cff47c32e88e28c58690 | import torch
import torch.nn as nn
from torch.nn.utils import weight_norm
class Chomp1d(nn.Module):
def __init__(self, chomp_size):
super().__init__()
self.chomp_size = chomp_size
def forward(self, x, pad_right=True):
return x[:, :, :-self.chomp_size].contiguous() if pad_right else x[
:, :, self.chomp_size:]
class TemporalBlock(nn.Module):
def __init__(self, n_inputs, n_outputs, kernel_size, stride, dilation,
padding, dropout):
super().__init__()
self.conv1 = weight_norm(nn.Conv1d(n_inputs, n_outputs, kernel_size,
stride=stride, padding=padding, dilation=dilation))
self.chomp1 = Chomp1d(padding)
self.relu1 = nn.LeakyReLU(negative_slope=0.01, inplace=False)
self.dropout1 = nn.Dropout(dropout)
self.conv2 = weight_norm(nn.Conv1d(n_outputs, n_outputs,
kernel_size, stride=stride, padding=padding, dilation=dilation))
self.chomp2 = Chomp1d(padding)
self.relu2 = nn.LeakyReLU(negative_slope=0.01, inplace=False)
self.dropout2 = nn.Dropout(dropout)
self.net = nn.Sequential(self.conv1, self.chomp1, self.relu1, self.
dropout1, self.conv2, self.chomp2, self.relu2, self.dropout2)
self.downsample = nn.Conv1d(n_inputs, n_outputs, 1
) if n_inputs != n_outputs else None
self.relu = nn.LeakyReLU(negative_slope=0.01, inplace=False)
self.init_weights()
def init_weights(self):
self.conv1.weight.data.normal_(0, 0.01)
self.conv2.weight.data.normal_(0, 0.01)
if self.downsample is not None:
self.downsample.weight.data.normal_(0, 0.01)
def forward(self, x):
out = self.net(x)
res = x if self.downsample is None else self.downsample(x)
return self.relu(out + res)
class TemporalConvNet(nn.Module):
def __init__(self, num_inputs, num_channels, kernel_size, stride, dropout):
super().__init__()
self.network = nn.Sequential()
for i, nch in enumerate(num_channels):
dilation = 2 ** i
in_channels = num_inputs if i == 0 else num_channels[i - 1]
out_channels = num_channels[i]
padding = (kernel_size - 1) * dilation
self.network.add_module('tblock' + str(i), TemporalBlock(
n_inputs=in_channels, n_outputs=out_channels, kernel_size=
kernel_size, stride=stride, dilation=dilation, padding=
padding, dropout=dropout))
def forward(self, x):
return self.network(x)
class Model(nn.Module):
def __init__(self, size_in, size_out, list_conv_depths, size_kernel,
stride, dropout):
super().__init__()
self.tcn = TemporalConvNet(num_inputs=size_in, num_channels=
list_conv_depths, kernel_size=size_kernel, stride=stride,
dropout=dropout)
self.linear1 = nn.Linear(list_conv_depths[-1], list_conv_depths[-1] //
2)
self.linear2 = nn.Linear(list_conv_depths[-1] // 2,
list_conv_depths[-1] // 4)
self.linear3 = nn.Linear(list_conv_depths[-1] // 4, size_out)
self.softmax = nn.Softmax(dim=2)
def forward(self, x):
output = self.tcn(x.transpose(1, 2))
output = self.linear1(output.transpose(1, 2))
output = self.linear2(output)
output = self.linear3(output)
output = self.softmax(output)
return output.transpose(1, 2)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'size_in': 4, 'size_out': 4, 'list_conv_depths': [4, 4],
'size_kernel': 4, 'stride': 1, 'dropout': 0.5}]
|
HearthstoneNet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/ej/cejfrwnzxinkchwn6symdb72fdtj7gix5hy2vuswodhbeh45mrae.py
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# x => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 64
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/7z/c7zuih2ysjtir5rh5seep5ijnhokjlgkyjw2edhf257ahvz4iipr.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_2 => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = (xindex // 32)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/gk/cgkeox2n6lb2433ix673nzbipppptbujeertkjua6x5dbykrv7hm.py
# Topologically Sorted Source Nodes: [conv2d_2, x_3], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# x_3 => relu_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 1024) % 64
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/gq/cgqvpghyzwkbypvbvlyig6ohuplw6qvhn73hkwj6auj5e2m5mqio.py
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_5 => getitem_2, getitem_3
# Graph fragment:
# %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (32 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (33 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/d4/cd4s5ogbgu46xbdaa3oicwxi7l6pnddrap26pxiqzcpei77ta53h.py
# Topologically Sorted Source Nodes: [conv2d_4, x_6], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_4 => convolution_4
# x_6 => relu_4
# Graph fragment:
# %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {})
triton_poi_fused_convolution_relu_4 = async_compile.triton('triton_poi_fused_convolution_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 256) % 64
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/tf/ctf2r7eigqwkdodsifmawrdg3d2tjzbdky4jzlpz3pwqkaejrzil.py
# Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_10 => relu_6
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_7), kwargs = {})
# %relu_6 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_5 = async_compile.triton('triton_poi_fused_relu_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/jr/cjra477igxtot4bhv5bzdyliqhrsl24s254uusqxosnd22fcfkwp.py
# Topologically Sorted Source Nodes: [x_12], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# x_12 => amax, exp, log, sub, sub_1, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%addmm_1, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm_1, %amax), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
triton_per_fused__log_softmax_6 = async_compile.triton('triton_per_fused__log_softmax_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__log_softmax_6(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (10*x0)), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float("-inf"))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl_math.log(tmp10)
tmp12 = tmp5 - tmp11
tl.store(out_ptr2 + (r1 + (10*x0)), tmp12, rmask & xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (64, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_2, (64, ), (1, ))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (64, ), (1, ))
assert_size_stride(primals_6, (64, 64), (64, 1))
assert_size_stride(primals_7, (64, ), (1, ))
assert_size_stride(primals_8, (10, 64), (64, 1))
assert_size_stride(primals_9, (10, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 1048576, grid=grid(1048576), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_0.run(buf3, primals_5, 1048576, grid=grid(1048576), stream=stream0)
buf4 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.float32)
buf5 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.int8)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_1.run(buf3, buf4, buf5, 262144, grid=grid(262144), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf4, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [conv2d_2, x_3], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf7, primals_5, 262144, grid=grid(262144), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(buf7, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf9 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [conv2d_3, x_4], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf9, primals_5, 262144, grid=grid(262144), stream=stream0)
buf10 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1), torch.float32)
buf11 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1), torch.int8)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_3.run(buf9, buf10, buf11, 65536, grid=grid(65536), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution]
buf12 = extern_kernels.convolution(buf10, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 64, 16, 16), (16384, 256, 16, 1))
buf13 = buf12; del buf12 # reuse
# Topologically Sorted Source Nodes: [conv2d_4, x_6], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_4.run(buf13, primals_5, 65536, grid=grid(65536), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_5], Original ATen: [aten.convolution]
buf14 = extern_kernels.convolution(buf13, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 64, 16, 16), (16384, 256, 16, 1))
buf15 = buf14; del buf14 # reuse
# Topologically Sorted Source Nodes: [conv2d_5, x_7], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_4.run(buf15, primals_5, 65536, grid=grid(65536), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.avg_pool2d]
buf16 = torch.ops.aten.avg_pool2d.default(buf15, [7, 7], [7, 7], [0, 0], False, True, None)
buf17 = buf16
del buf16
buf18 = empty_strided_cuda((16, 64), (64, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf17, (16, 64), (64, 1), 0), reinterpret_tensor(primals_6, (64, 64), (1, 64), 0), out=buf18)
buf19 = buf18; del buf18 # reuse
# Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.relu]
triton_poi_fused_relu_5.run(buf19, primals_7, 1024, grid=grid(1024), stream=stream0)
del primals_7
buf20 = empty_strided_cuda((16, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_11], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, buf19, reinterpret_tensor(primals_8, (64, 10), (1, 64), 0), alpha=1, beta=1, out=buf20)
del primals_9
buf23 = empty_strided_cuda((16, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_12], Original ATen: [aten._log_softmax]
triton_per_fused__log_softmax_6.run(buf20, buf23, 16, 10, grid=grid(16), stream=stream0)
del buf20
return (buf23, primals_1, primals_3, primals_4, buf1, buf3, buf4, buf5, buf7, buf9, buf10, buf11, buf13, buf15, reinterpret_tensor(buf17, (16, 64), (64, 1), 0), buf19, buf23, primals_8, primals_6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((64, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((64, 64), (64, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((10, 64), (64, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.functional as F
class HearthstoneNet(nn.Module):
def __init__(self):
super(HearthstoneNet, self).__init__()
self.conv1 = nn.Conv2d(1, 64, kernel_size=(3, 3), padding=1)
self.conv2 = nn.Conv2d(64, 64, kernel_size=(3, 3), padding=1)
self.max_pool = nn.MaxPool2d(2, 2)
self.global_pool = nn.AvgPool2d(7)
self.fc1 = nn.Linear(64, 64)
self.fc2 = nn.Linear(64, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = self.max_pool(x)
x = F.relu(self.conv2(x))
x = F.relu(self.conv2(x))
x = self.max_pool(x)
x = F.relu(self.conv2(x))
x = F.relu(self.conv2(x))
x = self.global_pool(x)
x = x.view(-1, 64)
x = F.relu(self.fc1(x))
x = self.fc2(x)
x = F.log_softmax(x)
return x
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = xindex // 32
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp5 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_per_fused__log_softmax_6(in_ptr0, out_ptr2, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 16
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 10 * x0), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl_math.log(tmp10)
tmp12 = tmp5 - tmp11
tl.store(out_ptr2 + (r1 + 10 * x0), tmp12, rmask & xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (64, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (64, 64), (64, 1))
assert_size_stride(primals_7, (64,), (1,))
assert_size_stride(primals_8, (10, 64), (64, 1))
assert_size_stride(primals_9, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(1048576)](buf1, primals_2,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_0[grid(1048576)](buf3, primals_5,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
buf4 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1),
torch.float32)
buf5 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(262144)](buf3, buf4,
buf5, 262144, XBLOCK=512, num_warps=8, num_stages=1)
buf6 = extern_kernels.convolution(buf4, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf7 = buf6
del buf6
triton_poi_fused_convolution_relu_2[grid(262144)](buf7, primals_5,
262144, XBLOCK=512, num_warps=8, num_stages=1)
buf8 = extern_kernels.convolution(buf7, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf9 = buf8
del buf8
triton_poi_fused_convolution_relu_2[grid(262144)](buf9, primals_5,
262144, XBLOCK=512, num_warps=8, num_stages=1)
buf10 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1),
torch.float32)
buf11 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_3[grid(65536)](buf9, buf10,
buf11, 65536, XBLOCK=256, num_warps=4, num_stages=1)
buf12 = extern_kernels.convolution(buf10, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 64, 16, 16), (16384, 256, 16, 1))
buf13 = buf12
del buf12
triton_poi_fused_convolution_relu_4[grid(65536)](buf13, primals_5,
65536, XBLOCK=512, num_warps=4, num_stages=1)
buf14 = extern_kernels.convolution(buf13, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 64, 16, 16), (16384, 256, 16, 1))
buf15 = buf14
del buf14
triton_poi_fused_convolution_relu_4[grid(65536)](buf15, primals_5,
65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_5
buf16 = torch.ops.aten.avg_pool2d.default(buf15, [7, 7], [7, 7], [0,
0], False, True, None)
buf17 = buf16
del buf16
buf18 = empty_strided_cuda((16, 64), (64, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf17, (16, 64), (64, 1), 0),
reinterpret_tensor(primals_6, (64, 64), (1, 64), 0), out=buf18)
buf19 = buf18
del buf18
triton_poi_fused_relu_5[grid(1024)](buf19, primals_7, 1024, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_7
buf20 = empty_strided_cuda((16, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_9, buf19, reinterpret_tensor(primals_8,
(64, 10), (1, 64), 0), alpha=1, beta=1, out=buf20)
del primals_9
buf23 = empty_strided_cuda((16, 10), (10, 1), torch.float32)
triton_per_fused__log_softmax_6[grid(16)](buf20, buf23, 16, 10,
XBLOCK=1, num_warps=2, num_stages=1)
del buf20
return (buf23, primals_1, primals_3, primals_4, buf1, buf3, buf4, buf5,
buf7, buf9, buf10, buf11, buf13, buf15, reinterpret_tensor(buf17, (
16, 64), (64, 1), 0), buf19, buf23, primals_8, primals_6)
class HearthstoneNetNew(nn.Module):
def __init__(self):
super(HearthstoneNetNew, self).__init__()
self.conv1 = nn.Conv2d(1, 64, kernel_size=(3, 3), padding=1)
self.conv2 = nn.Conv2d(64, 64, kernel_size=(3, 3), padding=1)
self.max_pool = nn.MaxPool2d(2, 2)
self.global_pool = nn.AvgPool2d(7)
self.fc1 = nn.Linear(64, 64)
self.fc2 = nn.Linear(64, 10)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.fc1.weight
primals_7 = self.fc1.bias
primals_8 = self.fc2.weight
primals_9 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| dianarvp/stone_ground_hearth_battles | HearthstoneNet | false | 1,844 | [
"Apache-2.0"
] | 0 | 450e70eaef21b543be579a6d696676fb148a99b0 | https://github.com/dianarvp/stone_ground_hearth_battles/tree/450e70eaef21b543be579a6d696676fb148a99b0 | import torch
from torch import nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 64, kernel_size=(3, 3), padding=1)
self.conv2 = nn.Conv2d(64, 64, kernel_size=(3, 3), padding=1)
self.max_pool = nn.MaxPool2d(2, 2)
self.global_pool = nn.AvgPool2d(7)
self.fc1 = nn.Linear(64, 64)
self.fc2 = nn.Linear(64, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = self.max_pool(x)
x = F.relu(self.conv2(x))
x = F.relu(self.conv2(x))
x = self.max_pool(x)
x = F.relu(self.conv2(x))
x = F.relu(self.conv2(x))
x = self.global_pool(x)
x = x.view(-1, 64)
x = F.relu(self.fc1(x))
x = self.fc2(x)
x = F.log_softmax(x)
return x
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return []
|
AttnBertPooler | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/fx/cfxreppf45syanwafc3k3lifezkx2ekug2kzigzyzfpugesb4ddv.py
# Topologically Sorted Source Nodes: [scores, attn_token_tensor], Original ATen: [aten.div, aten.bmm]
# Source node to ATen node mapping:
# attn_token_tensor => bmm_1
# scores => div
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%bmm, 2.0), kwargs = {})
# %bmm_1 : [num_users=1] = call_function[target=torch.ops.aten.bmm.default](args = (%expand_2, %expand_3), kwargs = {})
triton_poi_fused_bmm_div_0 = async_compile.triton('triton_poi_fused_bmm_div_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_bmm_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_bmm_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 12
x1 = (xindex // 12)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/yp/cypbyz3kamya2mpx5pfq3nuyc3xjsjghcettcl56hvfse3xrpddb.py
# Topologically Sorted Source Nodes: [scores], Original ATen: [aten.div]
# Source node to ATen node mapping:
# scores => div
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%bmm, 2.0), kwargs = {})
triton_poi_fused_div_1 = async_compile.triton('triton_poi_fused_div_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_1(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 12
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tl.store(in_out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/qy/cqyuje6oau63esfxkwa5avylwvoxv5irn3nl4odupvuzzzonuw3x.py
# Topologically Sorted Source Nodes: [pooled_token_tensor], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# pooled_token_tensor => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%view_8, %squeeze], -1), kwargs = {})
triton_poi_fused_cat_2 = async_compile.triton('triton_poi_fused_cat_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + ((16*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/4c/c4ccgi6tfqeit7hozfgiiyzjvq4uffp4epyfj3akusm4vki5mmcc.py
# Topologically Sorted Source Nodes: [pooled_output_1], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# pooled_output_1 => tanh
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_3), kwargs = {})
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_tanh_3 = async_compile.triton('triton_poi_fused_tanh_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (8, 8), (8, 1))
assert_size_stride(primals_3, (8, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 3, 1), (3, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 3, 4), (16, 4, 1), 4), reinterpret_tensor(primals_1, (4, 4, 1), (16, 1, 0), 0), out=buf0)
buf1 = empty_strided_cuda((4, 4, 3), (12, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [scores, attn_token_tensor], Original ATen: [aten.div, aten.bmm]
stream0 = get_raw_stream(0)
triton_poi_fused_bmm_div_0.run(primals_1, buf1, 48, grid=grid(48), stream=stream0)
buf2 = reinterpret_tensor(buf0, (4, 3, 1), (3, 1, 12), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [scores], Original ATen: [aten.div]
triton_poi_fused_div_1.run(buf2, 12, grid=grid(12), stream=stream0)
buf3 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [scores, attn_token_tensor], Original ATen: [aten.div, aten.bmm]
extern_kernels.bmm(buf1, buf2, out=buf3)
del buf1
del buf2
buf4 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [pooled_token_tensor], Original ATen: [aten.cat]
triton_poi_fused_cat_2.run(buf3, primals_1, buf4, 32, grid=grid(32), stream=stream0)
del buf3
del primals_1
buf5 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf4, reinterpret_tensor(primals_2, (8, 8), (1, 8), 0), out=buf5)
del primals_2
buf6 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [pooled_output_1], Original ATen: [aten.tanh]
triton_poi_fused_tanh_3.run(buf6, primals_3, 32, grid=grid(32), stream=stream0)
del primals_3
return (buf6, buf4, buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((8, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import math
import torch
from torch import nn
class AttnBertPooler(nn.Module):
def __init__(self, config):
super(AttnBertPooler, self).__init__()
self.dense = nn.Linear(config.hidden_size * 2, config.hidden_size * 2)
self.activation = nn.Tanh()
self.hidden_size = config.hidden_size
def forward(self, hidden_states):
first_token_tensor = hidden_states[:, 0].view(len(hidden_states), -1, 1
)
scores = torch.matmul(hidden_states[:, 1:], first_token_tensor
) / math.sqrt(self.hidden_size)
attn_token_tensor = torch.matmul(hidden_states[:, 1:].view(
hidden_states.size(0), self.hidden_size, -1), scores)
attn_token_tensor = attn_token_tensor.view(attn_token_tensor.size(0
), self.hidden_size)
first_token_tensor = first_token_tensor.squeeze(2)
pooled_token_tensor = torch.cat((attn_token_tensor,
first_token_tensor), dim=-1)
pooled_output = self.dense(pooled_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidden_size=4)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_bmm_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 48
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 12
x1 = xindex // 12
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_div_1(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 12
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tl.store(in_out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (16 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_tanh_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (8, 8), (8, 1))
assert_size_stride(primals_3, (8,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 3, 1), (3, 1, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 3, 4), (16, 4,
1), 4), reinterpret_tensor(primals_1, (4, 4, 1), (16, 1, 0), 0),
out=buf0)
buf1 = empty_strided_cuda((4, 4, 3), (12, 3, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_bmm_div_0[grid(48)](primals_1, buf1, 48, XBLOCK=64,
num_warps=1, num_stages=1)
buf2 = reinterpret_tensor(buf0, (4, 3, 1), (3, 1, 12), 0)
del buf0
triton_poi_fused_div_1[grid(12)](buf2, 12, XBLOCK=16, num_warps=1,
num_stages=1)
buf3 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf1, buf2, out=buf3)
del buf1
del buf2
buf4 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
triton_poi_fused_cat_2[grid(32)](buf3, primals_1, buf4, 32, XBLOCK=
32, num_warps=1, num_stages=1)
del buf3
del primals_1
buf5 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
extern_kernels.mm(buf4, reinterpret_tensor(primals_2, (8, 8), (1, 8
), 0), out=buf5)
del primals_2
buf6 = buf5
del buf5
triton_poi_fused_tanh_3[grid(32)](buf6, primals_3, 32, XBLOCK=32,
num_warps=1, num_stages=1)
del primals_3
return buf6, buf4, buf6
class AttnBertPoolerNew(nn.Module):
def __init__(self, config):
super(AttnBertPoolerNew, self).__init__()
self.dense = nn.Linear(config.hidden_size * 2, config.hidden_size * 2)
self.activation = nn.Tanh()
self.hidden_size = config.hidden_size
def forward(self, input_0):
primals_2 = self.dense.weight
primals_3 = self.dense.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| AnonymousAuthor2013/PostRec | AttnBertPooler | false | 1,845 | [
"MIT"
] | 0 | a1461f716d177e28b96ca29d1398f96b5717c1e1 | https://github.com/AnonymousAuthor2013/PostRec/tree/a1461f716d177e28b96ca29d1398f96b5717c1e1 | from _paritybench_helpers import _mock_config
import math
import torch
from torch import nn
class Model(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size * 2, config.hidden_size * 2)
self.activation = nn.Tanh()
self.hidden_size = config.hidden_size
def forward(self, hidden_states):
first_token_tensor = hidden_states[:, 0].view(len(hidden_states), -1, 1
)
scores = torch.matmul(hidden_states[:, 1:], first_token_tensor
) / math.sqrt(self.hidden_size)
attn_token_tensor = torch.matmul(hidden_states[:, 1:].view(
hidden_states.size(0), self.hidden_size, -1), scores)
attn_token_tensor = attn_token_tensor.view(attn_token_tensor.size(0
), self.hidden_size)
first_token_tensor = first_token_tensor.squeeze(2)
pooled_token_tensor = torch.cat((attn_token_tensor,
first_token_tensor), dim=-1)
pooled_output = self.dense(pooled_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return []
|
TestNet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/j5/cj55xitdozjbjezyzqxiireroieqwfmjmm2hic7w7stgzy2ofejl.py
# Topologically Sorted Source Nodes: [x, x_1, x_2], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# x => mul
# x_1 => mul_1
# x_2 => mul_2
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %primals_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_3), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %primals_4), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr2 + (0))
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr3 + (0))
tmp8 = tl.broadcast_to(tmp7, [XBLOCK])
tmp3 = tmp0 * tmp2
tmp6 = tmp3 * tmp5
tmp9 = tmp6 * tmp8
tl.store(out_ptr0 + (x0), tmp9, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (1, ), (1, ))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (1, ), (1, ))
assert_size_stride(primals_4, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x, x_1, x_2], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(primals_2, primals_1, primals_3, primals_4, buf0, 256, grid=grid(256), stream=stream0)
return (buf0, primals_1, primals_2, primals_3, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class ScaleLayer(nn.Module):
def __init__(self, init_value=0.001):
super().__init__()
self.scale = nn.Parameter(torch.FloatTensor([init_value]))
def forward(self, input):
return input * self.scale
class TestNet(nn.Module):
def __init__(self):
super(TestNet, self).__init__()
self.scaler1 = ScaleLayer(init_value=torch.tensor(2.0))
self.scaler2 = ScaleLayer(init_value=torch.tensor(2.0))
self.scaler3 = ScaleLayer(init_value=torch.tensor(2.0))
def forward(self, x):
x = self.scaler1(x)
x = self.scaler2(x)
x = self.scaler3(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr2 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr3 + 0)
tmp8 = tl.broadcast_to(tmp7, [XBLOCK])
tmp3 = tmp0 * tmp2
tmp6 = tmp3 * tmp5
tmp9 = tmp6 * tmp8
tl.store(out_ptr0 + x0, tmp9, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (1,), (1,))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (1,), (1,))
assert_size_stride(primals_4, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(256)](primals_2, primals_1, primals_3,
primals_4, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1)
return buf0, primals_1, primals_2, primals_3, primals_4
class ScaleLayer(nn.Module):
def __init__(self, init_value=0.001):
super().__init__()
self.scale = nn.Parameter(torch.FloatTensor([init_value]))
def forward(self, input):
return input * self.scale
class TestNetNew(nn.Module):
def __init__(self):
super(TestNetNew, self).__init__()
self.scaler1 = ScaleLayer(init_value=torch.tensor(2.0))
self.scaler2 = ScaleLayer(init_value=torch.tensor(2.0))
self.scaler3 = ScaleLayer(init_value=torch.tensor(2.0))
def forward(self, input_0):
primals_1 = self.scaler1.scale
primals_3 = self.scaler2.scale
primals_4 = self.scaler3.scale
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| dizzyvn/torch-tcav | TestNet | false | 1,846 | [
"Apache-2.0"
] | 0 | c9795e817d1104923ef7422f5575607e6b835abc | https://github.com/dizzyvn/torch-tcav/tree/c9795e817d1104923ef7422f5575607e6b835abc | import torch
import torch.nn as nn
class ScaleLayer(nn.Module):
def __init__(self, init_value=0.001):
super().__init__()
self.scale = nn.Parameter(torch.FloatTensor([init_value]))
def forward(self, input):
return input * self.scale
class Model(nn.Module):
def __init__(self):
super().__init__()
self.scaler1 = ScaleLayer(init_value=torch.tensor(2.0))
self.scaler2 = ScaleLayer(init_value=torch.tensor(2.0))
self.scaler3 = ScaleLayer(init_value=torch.tensor(2.0))
def forward(self, x):
x = self.scaler1(x)
x = self.scaler2(x)
x = self.scaler3(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
BertPooler | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/yy/cyya3js6wt64vdji3sfisvrqyfvqxwkwqq5mzg5bqjl2crzjs4t3.py
# Topologically Sorted Source Nodes: [pooled_output], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# pooled_output => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%select,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/2g/c2gw7362i2a6wsfdx2sxyywx4o6ronjg6goebvdn44w6gpjsxpbc.py
# Topologically Sorted Source Nodes: [pooled_output, pooled_output_1], Original ATen: [aten.add, aten.tanh]
# Source node to ATen node mapping:
# pooled_output => add
# pooled_output_1 => tanh
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_3), kwargs = {})
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%add,), kwargs = {})
triton_poi_fused_add_tanh_1 = async_compile.triton('triton_poi_fused_add_tanh_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_tanh_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pooled_output], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(primals_1, buf0, 64, grid=grid(64), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pooled_output], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
del primals_2
buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [pooled_output, pooled_output_1], Original ATen: [aten.add, aten.tanh]
triton_poi_fused_add_tanh_1.run(buf2, primals_3, 64, grid=grid(64), stream=stream0)
del primals_3
return (buf2, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
class BertPooler(nn.Module):
def __init__(self, config):
super(BertPooler, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidden_size=4)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64)](primals_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
del primals_2
buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0)
del buf1
triton_poi_fused_add_tanh_1[grid(64)](buf2, primals_3, 64, XBLOCK=
64, num_warps=1, num_stages=1)
del primals_3
return buf2, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), buf2
class BertPoolerNew(nn.Module):
def __init__(self, config):
super(BertPoolerNew, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, input_0):
primals_2 = self.dense.weight
primals_3 = self.dense.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| Andr3wis2Cool4School/AI-pro | BertPooler | false | 1,847 | [
"MIT"
] | 0 | dfe5f5959bc187d899a86f13b84158c66f64d1cc | https://github.com/Andr3wis2Cool4School/AI-pro/tree/dfe5f5959bc187d899a86f13b84158c66f64d1cc | from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
BatchNorm | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/kt/cktr2fi4oyvatsyylkr2wbxli7afdvnkso4duaghvvhtpd2q6ygy.py
# Topologically Sorted Source Nodes: [mean, sub, pow_1, mean_1, var, log, sum_1, logdet], Original ATen: [aten.mean, aten.sub, aten.pow, aten.add, aten.log, aten.sum, aten.mul]
# Source node to ATen node mapping:
# log => log
# logdet => mul
# mean => mean
# mean_1 => mean_1
# pow_1 => pow_1
# sub => sub
# sum_1 => sum_1
# var => add
# Graph fragment:
# %mean : [num_users=3] = call_function[target=torch.ops.aten.mean.dim](args = (%arg0_1, [0]), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %mean), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [0]), kwargs = {})
# %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean_1, 1e-05), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%log,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, -0.5), kwargs = {})
triton_per_fused_add_log_mean_mul_pow_sub_sum_0 = async_compile.triton('triton_per_fused_add_log_mean_mul_pow_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_log_mean_mul_pow_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_log_mean_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr0 + (64 + r0), None)
tmp3 = tl.load(in_ptr0 + (128 + r0), None)
tmp5 = tl.load(in_ptr0 + (192 + r0), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = tl_math.log(tmp22)
tmp24 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK])
tmp26 = tl.sum(tmp24, 1)[:, None]
tmp27 = -0.5
tmp28 = tmp26 * tmp27
tl.store(out_ptr0 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp8, None)
tl.store(out_ptr1 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp22, None)
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp28, None)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/xm/cxmt6duqqeteu4xwsym3p3vmedzj7nayhm62z7v7nl2hxjisngyy.py
# Topologically Sorted Source Nodes: [sub_1, sqrt, u], Original ATen: [aten.sub, aten.sqrt, aten.div]
# Source node to ATen node mapping:
# sqrt => sqrt
# sub_1 => sub_1
# u => div
# Graph fragment:
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %mean), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_1, %sqrt), kwargs = {})
triton_poi_fused_div_sqrt_sub_1 = async_compile.triton('triton_poi_fused_div_sqrt_sub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_sqrt_sub_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_sqrt_sub_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = libdevice.sqrt(tmp3)
tmp5 = tmp2 / tmp4
tl.store(out_ptr0 + (x2), tmp5, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [mean, sub, pow_1, mean_1, var, log, sum_1, logdet], Original ATen: [aten.mean, aten.sub, aten.pow, aten.add, aten.log, aten.sum, aten.mul]
stream0 = get_raw_stream(0)
triton_per_fused_add_log_mean_mul_pow_sub_sum_0.run(buf4, arg0_1, buf0, buf1, 1, 64, grid=grid(1), stream=stream0)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sub_1, sqrt, u], Original ATen: [aten.sub, aten.sqrt, aten.div]
triton_poi_fused_div_sqrt_sub_1.run(arg0_1, buf0, buf1, buf2, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf2, buf4, buf1, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
from torch import tensor
import torch.nn as nn
import numpy.random as rng
class BaseFlow(nn.Module):
""" """
def __init__(self, n_inputs, **kwargs):
super(BaseFlow, self).__init__()
self.n_inputs = n_inputs
def forward(self, x, **kwargs):
raise NotImplementedError
def generate_samples(self, n_samples=1, u=None, **kwargs):
raise NotImplementedError
def log_likelihood(self, x, **kwargs):
""" Calculates log p(x) with a Gaussian base density """
u, logdet_dudx = self.forward(x, **kwargs)
constant = float(-0.5 * self.n_inputs * np.log(2.0 * np.pi))
log_likelihood = constant - 0.5 * torch.sum(u ** 2, dim=1
) + logdet_dudx
return u, log_likelihood
def log_likelihood_and_score(self, x, **kwargs):
""" Calculates log p(x) and t(x) with a Gaussian base density """
u, log_likelihood = self.log_likelihood(x, **kwargs)
return u, log_likelihood, None
class BatchNorm(BaseFlow):
"""BatchNorm implementation"""
def __init__(self, n_inputs, alpha=0.1, eps=1e-05):
super(BatchNorm, self).__init__(n_inputs)
self.n_inputs = n_inputs
self.alpha = alpha
self.eps = eps
self.calculated_running_mean = False
self.running_mean = torch.zeros(self.n_inputs)
self.running_var = torch.zeros(self.n_inputs)
def forward(self, x, fixed_params=False):
"""Calculates x -> u(x) (batch norming)"""
if fixed_params:
mean = self.running_mean
var = self.running_var
else:
mean = torch.mean(x, dim=0)
var = torch.mean((x - mean) ** 2, dim=0) + self.eps
if not self.calculated_running_mean:
self.running_mean = mean
self.running_var = var
else:
self.running_mean = (1.0 - self.alpha
) * self.running_mean + self.alpha * mean
self.running_var = (1.0 - self.alpha
) * self.running_var + self.alpha * var
self.calculated_running_mean = True
u = (x - mean) / torch.sqrt(var)
logdet = -0.5 * torch.sum(torch.log(var))
return u, logdet
def inverse(self, u):
"""Calculates u -> x(u) (the approximate inverse transformation based on running mean and variance)"""
x = torch.sqrt(self.running_var) * u + self.running_mean
return x
def generate_samples(self, n_samples=1, u=None, **kwargs):
if u is None:
u = tensor(rng.randn(n_samples, self.n_inputs))
x = torch.sqrt(self.running_var) * u + self.running_mean
return x
def to(self, *args, **kwargs):
logger.debug('Transforming BatchNorm to %s', args)
self = super()
self.running_mean = self.running_mean
self.running_var = self.running_var
return self
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_inputs': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import numpy as np
from torch import tensor
import torch.nn as nn
import numpy.random as rng
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_log_mean_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0,
out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr0 + (64 + r0), None)
tmp3 = tl.load(in_ptr0 + (128 + r0), None)
tmp5 = tl.load(in_ptr0 + (192 + r0), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = tl_math.log(tmp22)
tmp24 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK])
tmp26 = tl.sum(tmp24, 1)[:, None]
tmp27 = -0.5
tmp28 = tmp26 * tmp27
tl.store(out_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp8, None)
tl.store(out_ptr1 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp22, None)
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp28, None)
@triton.jit
def triton_poi_fused_div_sqrt_sub_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = libdevice.sqrt(tmp3)
tmp5 = tmp2 / tmp4
tl.store(out_ptr0 + x2, tmp5, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf3
del buf3
get_raw_stream(0)
triton_per_fused_add_log_mean_mul_pow_sub_sum_0[grid(1)](buf4,
arg0_1, buf0, buf1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_div_sqrt_sub_1[grid(256)](arg0_1, buf0, buf1, buf2,
256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf2, buf4, buf1, buf0
class BaseFlow(nn.Module):
""" """
def __init__(self, n_inputs, **kwargs):
super(BaseFlow, self).__init__()
self.n_inputs = n_inputs
def forward(self, x, **kwargs):
raise NotImplementedError
def generate_samples(self, n_samples=1, u=None, **kwargs):
raise NotImplementedError
def log_likelihood(self, x, **kwargs):
""" Calculates log p(x) with a Gaussian base density """
u, logdet_dudx = self.forward(x, **kwargs)
constant = float(-0.5 * self.n_inputs * np.log(2.0 * np.pi))
log_likelihood = constant - 0.5 * torch.sum(u ** 2, dim=1
) + logdet_dudx
return u, log_likelihood
def log_likelihood_and_score(self, x, **kwargs):
""" Calculates log p(x) and t(x) with a Gaussian base density """
u, log_likelihood = self.log_likelihood(x, **kwargs)
return u, log_likelihood, None
class BatchNormNew(BaseFlow):
"""BatchNorm implementation"""
def __init__(self, n_inputs, alpha=0.1, eps=1e-05):
super(BatchNormNew, self).__init__(n_inputs)
self.n_inputs = n_inputs
self.alpha = alpha
self.eps = eps
self.calculated_running_mean = False
self.running_mean = torch.zeros(self.n_inputs)
self.running_var = torch.zeros(self.n_inputs)
def inverse(self, u):
"""Calculates u -> x(u) (the approximate inverse transformation based on running mean and variance)"""
x = torch.sqrt(self.running_var) * u + self.running_mean
return x
def generate_samples(self, n_samples=1, u=None, **kwargs):
if u is None:
u = tensor(rng.randn(n_samples, self.n_inputs))
x = torch.sqrt(self.running_var) * u + self.running_mean
return x
def to(self, *args, **kwargs):
logger.debug('Transforming BatchNorm to %s', args)
self = super()
self.running_mean = self.running_mean
self.running_var = self.running_var
return self
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0], output[1]
| dlvp/madminer | BatchNorm | false | 1,848 | [
"MIT"
] | 0 | 4ae7d9b73452848a6c9d1b81b50ef316ff7a054f | https://github.com/dlvp/madminer/tree/4ae7d9b73452848a6c9d1b81b50ef316ff7a054f | import torch
import numpy as np
from torch import tensor
import torch.nn as nn
import numpy.random as rng
class BaseFlow(nn.Module):
""" """
def __init__(self, n_inputs, **kwargs):
super().__init__()
self.n_inputs = n_inputs
def forward(self, x, **kwargs):
raise NotImplementedError
def generate_samples(self, n_samples=1, u=None, **kwargs):
raise NotImplementedError
def log_likelihood(self, x, **kwargs):
""" Calculates log p(x) with a Gaussian base density """
u, logdet_dudx = self.forward(x, **kwargs)
constant = float(-0.5 * self.n_inputs * np.log(2.0 * np.pi))
log_likelihood = constant - 0.5 * torch.sum(u ** 2, dim=1
) + logdet_dudx
return u, log_likelihood
def log_likelihood_and_score(self, x, **kwargs):
""" Calculates log p(x) and t(x) with a Gaussian base density """
u, log_likelihood = self.log_likelihood(x, **kwargs)
return u, log_likelihood, None
class Model(BaseFlow):
"""BatchNorm implementation"""
def __init__(self, n_inputs, alpha=0.1, eps=1e-05):
super().__init__(n_inputs)
self.n_inputs = n_inputs
self.alpha = alpha
self.eps = eps
self.calculated_running_mean = False
self.running_mean = torch.zeros(self.n_inputs)
self.running_var = torch.zeros(self.n_inputs)
def forward(self, x, fixed_params=False):
"""Calculates x -> u(x) (batch norming)"""
if fixed_params:
mean = self.running_mean
var = self.running_var
else:
mean = torch.mean(x, dim=0)
var = torch.mean((x - mean) ** 2, dim=0) + self.eps
if not self.calculated_running_mean:
self.running_mean = mean
self.running_var = var
else:
self.running_mean = (1.0 - self.alpha
) * self.running_mean + self.alpha * mean
self.running_var = (1.0 - self.alpha
) * self.running_var + self.alpha * var
self.calculated_running_mean = True
u = (x - mean) / torch.sqrt(var)
logdet = -0.5 * torch.sum(torch.log(var))
return u, logdet
def inverse(self, u):
"""Calculates u -> x(u) (the approximate inverse transformation based on running mean and variance)"""
x = torch.sqrt(self.running_var) * u + self.running_mean
return x
def generate_samples(self, n_samples=1, u=None, **kwargs):
if u is None:
u = tensor(rng.randn(n_samples, self.n_inputs))
x = torch.sqrt(self.running_var) * u + self.running_mean
return x
def to(self, *args, **kwargs):
logger.debug('Transforming BatchNorm to %s', args)
self = super()
self.running_mean = self.running_mean
self.running_var = self.running_var
return self
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
Critic | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/nc/cncwsucylpsg2zmlivjfxu6vbd64ztxjndlsix2ysjtby3xohgk4.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# x => tanh
# Graph fragment:
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {})
triton_poi_fused_tanh_0 = async_compile.triton('triton_poi_fused_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (1, 4), (4, 1))
assert_size_stride(primals_7, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.tanh]
stream0 = get_raw_stream(0)
triton_poi_fused_tanh_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.tanh]
triton_poi_fused_tanh_0.run(buf3, primals_5, 256, grid=grid(256), stream=stream0)
del primals_5
buf5 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [v], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf5)
del primals_7
return (reinterpret_tensor(buf5, (4, 4, 4, 1), (16, 4, 1, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, buf3, primals_6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
import torch.nn.functional as F
class Critic(nn.Module):
def __init__(self, num_inputs, args):
super(Critic, self).__init__()
self.fc1 = nn.Linear(num_inputs, args.hidden_size)
self.fc2 = nn.Linear(args.hidden_size, args.hidden_size)
self.fc3 = nn.Linear(args.hidden_size, 1)
self.fc3.weight.data.mul_(0.1)
self.fc3.bias.data.mul_(0.0)
def forward(self, x):
x = F.tanh(self.fc1(x))
x = F.tanh(self.fc2(x))
v = self.fc3(x)
return v
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_inputs': 4, 'args': _mock_config(hidden_size=4)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (1, 4), (4, 1))
assert_size_stride(primals_7, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_tanh_0[grid(256)](buf1, primals_2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused_tanh_0[grid(256)](buf3, primals_5, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 1), (1, 4), 0),
alpha=1, beta=1, out=buf5)
del primals_7
return reinterpret_tensor(buf5, (4, 4, 4, 1), (16, 4, 1, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf1, buf3, primals_6, primals_4
class CriticNew(nn.Module):
def __init__(self, num_inputs, args):
super(CriticNew, self).__init__()
self.fc1 = nn.Linear(num_inputs, args.hidden_size)
self.fc2 = nn.Linear(args.hidden_size, args.hidden_size)
self.fc3 = nn.Linear(args.hidden_size, 1)
self.fc3.weight.data.mul_(0.1)
self.fc3.bias.data.mul_(0.0)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| dlrudco/pg_travel | Critic | false | 1,849 | [
"MIT"
] | 0 | 33733b624894095096af8201f7597c3244d3480d | https://github.com/dlrudco/pg_travel/tree/33733b624894095096af8201f7597c3244d3480d | from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, num_inputs, args):
super().__init__()
self.fc1 = nn.Linear(num_inputs, args.hidden_size)
self.fc2 = nn.Linear(args.hidden_size, args.hidden_size)
self.fc3 = nn.Linear(args.hidden_size, 1)
self.fc3.weight.data.mul_(0.1)
self.fc3.bias.data.mul_(0.0)
def forward(self, x):
x = F.tanh(self.fc1(x))
x = F.tanh(self.fc2(x))
v = self.fc3(x)
return v
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
MetapathAggrLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/sh/cshiy6ror5kro3kxqkozos64qydforzfucwulfa4fixxidkn2v3i.py
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone, aten._unsafe_view]
# Source node to ATen node mapping:
# matmul => clone, view
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format})
# %view : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%clone, [16, 4]), kwargs = {})
triton_poi_fused__unsafe_view_clone_0 = async_compile.triton('triton_poi_fused__unsafe_view_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_view_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_view_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*(x1 // 4)) + (16*(x1 % 4))), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/3c/c3cwkjtzzomj32jcievan3nbtpp4jqymlx7ihukipk6qstr4u5de.py
# Topologically Sorted Source Nodes: [e, e_1], Original ATen: [aten.leaky_relu, aten._softmax]
# Source node to ATen node mapping:
# e => gt, mul, where
# e_1 => amax, exp, sub
# Graph fragment:
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%squeeze, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze, 4), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %squeeze, %mul), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_leaky_relu_1 = async_compile.triton('triton_poi_fused__softmax_leaky_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_leaky_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_leaky_relu_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp6 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 4.0
tmp4 = tmp0 * tmp3
tmp5 = tl.where(tmp2, tmp0, tmp4)
tmp7 = tmp6 > tmp1
tmp8 = tmp6 * tmp3
tmp9 = tl.where(tmp7, tmp6, tmp8)
tmp11 = tmp10 > tmp1
tmp12 = tmp10 * tmp3
tmp13 = tl.where(tmp11, tmp10, tmp12)
tmp14 = triton_helpers.maximum(tmp9, tmp13)
tmp16 = tmp15 > tmp1
tmp17 = tmp15 * tmp3
tmp18 = tl.where(tmp16, tmp15, tmp17)
tmp19 = triton_helpers.maximum(tmp14, tmp18)
tmp21 = tmp20 > tmp1
tmp22 = tmp20 * tmp3
tmp23 = tl.where(tmp21, tmp20, tmp22)
tmp24 = triton_helpers.maximum(tmp19, tmp23)
tmp25 = tmp5 - tmp24
tmp26 = tl_math.exp(tmp25)
tl.store(out_ptr0 + (x2), tmp26, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/k6/ck6fz3qsfeqgn5jtm4ugikmu7cwvvlq3jpttijbb5kdniicwtyz6.py
# Topologically Sorted Source Nodes: [e_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# e_1 => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=4] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/kv/ckvabzp2kmk5yogvle2ldzyh22j2mfzux3cx44xcqffx4p7ydutr.py
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# output => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%unsqueeze_1, %unsqueeze_3, %unsqueeze_5, %unsqueeze_7],), kwargs = {})
triton_poi_fused_cat_3 = async_compile.triton('triton_poi_fused_cat_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x0 = xindex % 4
x2 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 2, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0), tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 3, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr2 + (x0), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp16 = tmp0 >= tmp12
tmp17 = tl.full([1], 4, tl.int64)
tmp18 = tmp0 < tmp17
tmp19 = tl.load(in_ptr3 + (x0), tmp16 & xmask, eviction_policy='evict_last', other=0.0)
tmp20 = tl.where(tmp14, tmp15, tmp19)
tmp21 = tl.where(tmp9, tmp10, tmp20)
tmp22 = tl.where(tmp4, tmp5, tmp21)
tl.store(out_ptr0 + (x2), tmp22, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 1), (1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone, aten._unsafe_view]
stream0 = get_raw_stream(0)
triton_poi_fused__unsafe_view_clone_0.run(primals_1, buf0, 64, grid=grid(64), stream=stream0)
buf1 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.mm]
extern_kernels.mm(buf0, primals_2, out=buf1)
del primals_2
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [e, e_1], Original ATen: [aten.leaky_relu, aten._softmax]
triton_poi_fused__softmax_leaky_relu_1.run(buf1, buf2, 16, grid=grid(16), stream=stream0)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [e_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf2, buf3, 16, grid=grid(16), stream=stream0)
del buf2
buf4 = empty_strided_cuda((1, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf3, (1, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (16, 1), 0), out=buf4)
buf5 = empty_strided_cuda((1, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_2], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf3, (1, 4), (4, 1), 4), reinterpret_tensor(primals_1, (4, 4), (16, 1), 4), out=buf5)
buf6 = empty_strided_cuda((1, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_3], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf3, (1, 4), (4, 1), 8), reinterpret_tensor(primals_1, (4, 4), (16, 1), 8), out=buf6)
buf7 = empty_strided_cuda((1, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_4], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf3, (1, 4), (4, 1), 12), reinterpret_tensor(primals_1, (4, 4), (16, 1), 12), out=buf7)
buf8 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.cat]
triton_poi_fused_cat_3.run(buf4, buf5, buf6, buf7, buf8, 16, grid=grid(16), stream=stream0)
del buf4
del buf5
del buf6
del buf7
return (buf8, buf1, reinterpret_tensor(primals_1, (4, 4), (1, 16), 12), reinterpret_tensor(primals_1, (4, 4), (1, 16), 8), reinterpret_tensor(primals_1, (4, 4), (1, 16), 4), reinterpret_tensor(primals_1, (4, 4), (1, 16), 0), reinterpret_tensor(buf0, (4, 16), (1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch.nn import functional as F
from torch import nn
class MetapathAggrLayer(nn.Module):
"""
metapath attention layer.
"""
def __init__(self, in_features, nmeta, dropout, alpha):
super(MetapathAggrLayer, self).__init__()
self.dropout = dropout
self.in_features = in_features
self.alpha = alpha
self.n_meta = nmeta
self.a = nn.Parameter(torch.zeros(size=(in_features, 1)))
nn.init.xavier_uniform_(self.a.data, gain=1.414)
self.leakyrelu = nn.LeakyReLU(self.alpha)
def forward(self, input):
input = input.transpose(0, 1)
N = input.size()[0]
a_input = input
e = self.leakyrelu(torch.matmul(a_input, self.a).squeeze(2))
e = F.softmax(e, dim=1)
output = [torch.matmul(e[i], input[i]).unsqueeze(0) for i in range(N)]
output = torch.cat(output)
return output
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features
) + ' -> ' + str(self.out_features) + ')'
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'nmeta': 4, 'dropout': 0.5, 'alpha': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__unsafe_view_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * (x1 // 4) + 16 * (x1 % 4)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused__softmax_leaky_relu_1(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp6 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp20 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 4.0
tmp4 = tmp0 * tmp3
tmp5 = tl.where(tmp2, tmp0, tmp4)
tmp7 = tmp6 > tmp1
tmp8 = tmp6 * tmp3
tmp9 = tl.where(tmp7, tmp6, tmp8)
tmp11 = tmp10 > tmp1
tmp12 = tmp10 * tmp3
tmp13 = tl.where(tmp11, tmp10, tmp12)
tmp14 = triton_helpers.maximum(tmp9, tmp13)
tmp16 = tmp15 > tmp1
tmp17 = tmp15 * tmp3
tmp18 = tl.where(tmp16, tmp15, tmp17)
tmp19 = triton_helpers.maximum(tmp14, tmp18)
tmp21 = tmp20 > tmp1
tmp22 = tmp20 * tmp3
tmp23 = tl.where(tmp21, tmp20, tmp22)
tmp24 = triton_helpers.maximum(tmp19, tmp23)
tmp25 = tmp5 - tmp24
tmp26 = tl_math.exp(tmp25)
tl.store(out_ptr0 + x2, tmp26, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x0, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 2, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + x0, tmp9 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 3, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr2 + x0, tmp14 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp16 = tmp0 >= tmp12
tl.full([1], 4, tl.int64)
tmp19 = tl.load(in_ptr3 + x0, tmp16 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp20 = tl.where(tmp14, tmp15, tmp19)
tmp21 = tl.where(tmp9, tmp10, tmp20)
tmp22 = tl.where(tmp4, tmp5, tmp21)
tl.store(out_ptr0 + x2, tmp22, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 1), (1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__unsafe_view_clone_0[grid(64)](primals_1, buf0, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
extern_kernels.mm(buf0, primals_2, out=buf1)
del primals_2
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_leaky_relu_1[grid(16)](buf1, buf2, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_2[grid(16)](buf2, buf3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf2
buf4 = empty_strided_cuda((1, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (1, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (16, 1), 0), out=buf4)
buf5 = empty_strided_cuda((1, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (1, 4), (4, 1), 4),
reinterpret_tensor(primals_1, (4, 4), (16, 1), 4), out=buf5)
buf6 = empty_strided_cuda((1, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (1, 4), (4, 1), 8),
reinterpret_tensor(primals_1, (4, 4), (16, 1), 8), out=buf6)
buf7 = empty_strided_cuda((1, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (1, 4), (4, 1), 12),
reinterpret_tensor(primals_1, (4, 4), (16, 1), 12), out=buf7)
buf8 = buf3
del buf3
triton_poi_fused_cat_3[grid(16)](buf4, buf5, buf6, buf7, buf8, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del buf4
del buf5
del buf6
del buf7
return buf8, buf1, reinterpret_tensor(primals_1, (4, 4), (1, 16), 12
), reinterpret_tensor(primals_1, (4, 4), (1, 16), 8
), reinterpret_tensor(primals_1, (4, 4), (1, 16), 4
), reinterpret_tensor(primals_1, (4, 4), (1, 16), 0
), reinterpret_tensor(buf0, (4, 16), (1, 4), 0)
class MetapathAggrLayerNew(nn.Module):
"""
metapath attention layer.
"""
def __init__(self, in_features, nmeta, dropout, alpha):
super(MetapathAggrLayerNew, self).__init__()
self.dropout = dropout
self.in_features = in_features
self.alpha = alpha
self.n_meta = nmeta
self.a = nn.Parameter(torch.zeros(size=(in_features, 1)))
nn.init.xavier_uniform_(self.a.data, gain=1.414)
self.leakyrelu = nn.LeakyReLU(self.alpha)
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features
) + ' -> ' + str(self.out_features) + ')'
def forward(self, input_0):
primals_2 = self.a
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
| dingdanhao110/HINGCN | MetapathAggrLayer | false | 1,850 | [
"MIT"
] | 0 | 281b73c03bd3b00e35bce4c5e1c27076233555e4 | https://github.com/dingdanhao110/HINGCN/tree/281b73c03bd3b00e35bce4c5e1c27076233555e4 | import torch
from torch.nn import functional as F
from torch import nn
class Model(nn.Module):
"""
metapath attention layer.
"""
def __init__(self, in_features, nmeta, dropout, alpha):
super().__init__()
self.dropout = dropout
self.in_features = in_features
self.alpha = alpha
self.n_meta = nmeta
self.a = nn.Parameter(torch.zeros(size=(in_features, 1)))
nn.init.xavier_uniform_(self.a.data, gain=1.414)
self.leakyrelu = nn.LeakyReLU(self.alpha)
def forward(self, input):
input = input.transpose(0, 1)
N = input.size()[0]
a_input = input
e = self.leakyrelu(torch.matmul(a_input, self.a).squeeze(2))
e = F.softmax(e, dim=1)
output = [torch.matmul(e[i], input[i]).unsqueeze(0) for i in range(N)]
output = torch.cat(output)
return output
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features
) + ' -> ' + str(self.out_features) + ')'
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [4, 4, 0.5, 4]
|
SoftDiceLossSquared | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_6/inductor_cache/3o/c3oqffj4mx4zijxuxjjrpiwlcprgvoj5jv7btywkbjnturqawb4x.py
# Topologically Sorted Source Nodes: [intersect, sum_1, pow_1, pow_2, denominator, sum_2], Original ATen: [aten.mul, aten.sum, aten.pow, aten.add]
# Source node to ATen node mapping:
# denominator => add
# intersect => mul
# pow_1 => pow_1
# pow_2 => pow_2
# sum_1 => sum_1
# sum_2 => sum_2
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [2, 3]), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg1_1, 2), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_1, %pow_2), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%add, [2, 3]), kwargs = {})
triton_per_fused_add_mul_pow_sum_0 = async_compile.triton('triton_per_fused_add_mul_pow_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mul_pow_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_mul_pow_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + (16*x0)), xmask, other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp7 = tmp0 * tmp0
tmp8 = tmp1 * tmp1
tmp9 = tmp7 + tmp8
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = tl.where(xmask, tmp10, 0)
tmp13 = tl.sum(tmp12, 1)[:, None]
tl.store(out_ptr0 + (x0), tmp6, xmask)
tl.store(out_ptr1 + (x0), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_6/inductor_cache/mt/cmtyese7mqn26fs2flzjcxkq5uwnszbzc5rdxm4ynytrwkyqb2qk.py
# Topologically Sorted Source Nodes: [intersect_1, mul_1, denominator_1, dc, dc_1, neg], Original ATen: [aten.add, aten.mul, aten.div, aten.mean, aten.neg]
# Source node to ATen node mapping:
# dc => div
# dc_1 => mean
# denominator_1 => add_2
# intersect_1 => add_1
# mul_1 => mul_1
# neg => neg
# Graph fragment:
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1.0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, 2), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, 1.0), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_1, %add_2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%div,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean,), kwargs = {})
triton_per_fused_add_div_mean_mul_neg_1 = async_compile.triton('triton_per_fused_add_div_mean_mul_neg_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mean_mul_neg_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mean_mul_neg_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp5 = tl.load(in_ptr1 + (r0), None)
tmp1 = 1.0
tmp2 = tmp0 + tmp1
tmp3 = 2.0
tmp4 = tmp2 * tmp3
tmp6 = tmp5 + tmp1
tmp7 = tmp4 / tmp6
tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK])
tmp10 = tl.sum(tmp8, 1)[:, None]
tmp11 = 16.0
tmp12 = tmp10 / tmp11
tmp13 = -tmp12
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp13, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [intersect, sum_1, pow_1, pow_2, denominator, sum_2], Original ATen: [aten.mul, aten.sum, aten.pow, aten.add]
stream0 = get_raw_stream(0)
triton_per_fused_add_mul_pow_sum_0.run(arg0_1, arg1_1, buf0, buf1, 16, 16, grid=grid(16), stream=stream0)
del arg0_1
del arg1_1
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [intersect_1, mul_1, denominator_1, dc, dc_1, neg], Original ATen: [aten.add, aten.mul, aten.div, aten.mean, aten.neg]
triton_per_fused_add_div_mean_mul_neg_1.run(buf3, buf0, buf1, 1, 16, grid=grid(1), stream=stream0)
del buf0
del buf1
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch._C
import torch.serialization
class SoftDiceLossSquared(nn.Module):
def __init__(self, apply_nonlin=None, batch_dice=False, do_bg=True,
smooth=1.0):
"""
squares the terms in the denominator as proposed by Milletari et al.
"""
super(SoftDiceLossSquared, self).__init__()
self.do_bg = do_bg
self.batch_dice = batch_dice
self.apply_nonlin = apply_nonlin
self.smooth = smooth
def forward(self, x, y, loss_mask=None):
shp_x = x.shape
shp_y = y.shape
if self.batch_dice:
axes = [0] + list(range(2, len(shp_x)))
else:
axes = list(range(2, len(shp_x)))
if self.apply_nonlin is not None:
x = self.apply_nonlin(x)
with torch.no_grad():
if len(shp_x) != len(shp_y):
y = y.view((shp_y[0], 1, *shp_y[1:]))
if all([(i == j) for i, j in zip(x.shape, y.shape)]):
y_onehot = y
else:
y = y.long()
y_onehot = torch.zeros(shp_x)
if x.device.type == 'cuda':
y_onehot = y_onehot
y_onehot.scatter_(1, y, 1).float()
intersect = x * y_onehot
denominator = x ** 2 + y_onehot ** 2
intersect = intersect.sum(axes, False) + self.smooth
denominator = denominator.sum(axes, False) + self.smooth
dc = 2 * intersect / denominator
if not self.do_bg:
if self.batch_dice:
dc = dc[1:]
else:
dc = dc[:, 1:]
dc = dc.mean()
return -dc
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch._C
import torch.serialization
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_mul_pow_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + 16 * x0), xmask, other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp7 = tmp0 * tmp0
tmp8 = tmp1 * tmp1
tmp9 = tmp7 + tmp8
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = tl.where(xmask, tmp10, 0)
tmp13 = tl.sum(tmp12, 1)[:, None]
tl.store(out_ptr0 + x0, tmp6, xmask)
tl.store(out_ptr1 + x0, tmp13, xmask)
@triton.jit
def triton_per_fused_add_div_mean_mul_neg_1(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp5 = tl.load(in_ptr1 + r0, None)
tmp1 = 1.0
tmp2 = tmp0 + tmp1
tmp3 = 2.0
tmp4 = tmp2 * tmp3
tmp6 = tmp5 + tmp1
tmp7 = tmp4 / tmp6
tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK])
tmp10 = tl.sum(tmp8, 1)[:, None]
tmp11 = 16.0
tmp12 = tmp10 / tmp11
tmp13 = -tmp12
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp13, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_add_mul_pow_sum_0[grid(16)](arg0_1, arg1_1, buf0,
buf1, 16, 16, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2
del buf2
triton_per_fused_add_div_mean_mul_neg_1[grid(1)](buf3, buf0, buf1,
1, 16, XBLOCK=1, num_warps=2, num_stages=1)
del buf0
del buf1
return buf3,
class SoftDiceLossSquaredNew(nn.Module):
def __init__(self, apply_nonlin=None, batch_dice=False, do_bg=True,
smooth=1.0):
"""
squares the terms in the denominator as proposed by Milletari et al.
"""
super(SoftDiceLossSquaredNew, self).__init__()
self.do_bg = do_bg
self.batch_dice = batch_dice
self.apply_nonlin = apply_nonlin
self.smooth = smooth
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| dkswxd/Swin-Transformer-Semantic-Segmentation | SoftDiceLossSquared | false | 1,851 | [
"Apache-2.0"
] | 0 | 6af19736e5492a01d8952d4ee86a6d59b21c2ae1 | https://github.com/dkswxd/Swin-Transformer-Semantic-Segmentation/tree/6af19736e5492a01d8952d4ee86a6d59b21c2ae1 | import torch
import torch.nn as nn
import torch._C
import torch.serialization
class Model(nn.Module):
def __init__(self, apply_nonlin=None, batch_dice=False, do_bg=True,
smooth=1.0):
"""
squares the terms in the denominator as proposed by Milletari et al.
"""
super().__init__()
self.do_bg = do_bg
self.batch_dice = batch_dice
self.apply_nonlin = apply_nonlin
self.smooth = smooth
def forward(self, x, y, loss_mask=None):
shp_x = x.shape
shp_y = y.shape
if self.batch_dice:
axes = [0] + list(range(2, len(shp_x)))
else:
axes = list(range(2, len(shp_x)))
if self.apply_nonlin is not None:
x = self.apply_nonlin(x)
with torch.no_grad():
if len(shp_x) != len(shp_y):
y = y.view((shp_y[0], 1, *shp_y[1:]))
if all([(i == j) for i, j in zip(x.shape, y.shape)]):
y_onehot = y
else:
y = y.long()
y_onehot = torch.zeros(shp_x)
if x.device.type == 'cuda':
y_onehot = y_onehot
y_onehot.scatter_(1, y, 1).float()
intersect = x * y_onehot
denominator = x ** 2 + y_onehot ** 2
intersect = intersect.sum(axes, False) + self.smooth
denominator = denominator.sum(axes, False) + self.smooth
dc = 2 * intersect / denominator
if not self.do_bg:
if self.batch_dice:
dc = dc[1:]
else:
dc = dc[:, 1:]
dc = dc.mean()
return -dc
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.