entry_point
stringlengths 1
65
| original_triton_code
stringlengths 4.5k
619k
| python_code
stringlengths 208
60.9k
| triton_code
stringlengths 1.15k
275k
| repo_name
stringlengths 7
115
| module_name
stringlengths 1
65
| synthetic
bool 1
class | uuid
int64 0
18.5k
| licenses
sequencelengths 1
6
| stars
int64 0
19.8k
| sha
stringlengths 40
40
| repo_link
stringlengths 72
180
| pytorch_code
stringlengths 200
4.05k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|
TVLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ap/capgkvksys2oryteu5tgoxocuctkimjqhc5ur574b57sgrqpa62k.py
# Topologically Sorted Source Nodes: [x_diff, pow_1, y_diff, pow_2, add, diff, mean, sqrt, mean_1], Original ATen: [aten.sub, aten.pow, aten.add, aten.mean, aten.sqrt]
# Source node to ATen node mapping:
# add => add
# diff => add_1
# mean => mean
# mean_1 => mean_1
# pow_1 => pow_1
# pow_2 => pow_2
# sqrt => sqrt
# x_diff => sub
# y_diff => sub_1
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_2, %slice_4), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_6, %slice_8), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_1, %pow_2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, 1e-08), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add_1, [1]), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%mean,), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sqrt,), kwargs = {})
triton_per_fused_add_mean_pow_sqrt_sub_0 = async_compile.triton('triton_per_fused_add_mean_pow_sqrt_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mean_pow_sqrt_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_mean_pow_sqrt_sub_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 4
r1 = (rindex // 4) % 4
r2 = (rindex // 16)
r3 = rindex
tmp0 = tl.load(in_ptr0 + ((4*((3) * ((3) <= (r1)) + (r1) * ((r1) < (3)))) + (64*r2) + ((3) * ((3) <= (1 + r0)) + (1 + r0) * ((1 + r0) < (3)))), None)
tmp1 = tl.load(in_ptr0 + ((4*((3) * ((3) <= (r1)) + (r1) * ((r1) < (3)))) + (64*r2) + ((3) * ((3) <= (r0)) + (r0) * ((r0) < (3)))), None)
tmp4 = tl.load(in_ptr0 + ((4*((3) * ((3) <= (1 + r1)) + (1 + r1) * ((1 + r1) < (3)))) + (64*r2) + ((3) * ((3) <= (r0)) + (r0) * ((r0) < (3)))), None)
tmp10 = tl.load(in_ptr0 + (16 + (4*((3) * ((3) <= (r1)) + (r1) * ((r1) < (3)))) + (64*r2) + ((3) * ((3) <= (1 + r0)) + (1 + r0) * ((1 + r0) < (3)))), None)
tmp11 = tl.load(in_ptr0 + (16 + (4*((3) * ((3) <= (r1)) + (r1) * ((r1) < (3)))) + (64*r2) + ((3) * ((3) <= (r0)) + (r0) * ((r0) < (3)))), None)
tmp14 = tl.load(in_ptr0 + (16 + (4*((3) * ((3) <= (1 + r1)) + (1 + r1) * ((1 + r1) < (3)))) + (64*r2) + ((3) * ((3) <= (r0)) + (r0) * ((r0) < (3)))), None)
tmp20 = tl.load(in_ptr0 + (32 + (4*((3) * ((3) <= (r1)) + (r1) * ((r1) < (3)))) + (64*r2) + ((3) * ((3) <= (1 + r0)) + (1 + r0) * ((1 + r0) < (3)))), None)
tmp21 = tl.load(in_ptr0 + (32 + (4*((3) * ((3) <= (r1)) + (r1) * ((r1) < (3)))) + (64*r2) + ((3) * ((3) <= (r0)) + (r0) * ((r0) < (3)))), None)
tmp24 = tl.load(in_ptr0 + (32 + (4*((3) * ((3) <= (1 + r1)) + (1 + r1) * ((1 + r1) < (3)))) + (64*r2) + ((3) * ((3) <= (r0)) + (r0) * ((r0) < (3)))), None)
tmp30 = tl.load(in_ptr0 + (48 + (4*((3) * ((3) <= (r1)) + (r1) * ((r1) < (3)))) + (64*r2) + ((3) * ((3) <= (1 + r0)) + (1 + r0) * ((1 + r0) < (3)))), None)
tmp31 = tl.load(in_ptr0 + (48 + (4*((3) * ((3) <= (r1)) + (r1) * ((r1) < (3)))) + (64*r2) + ((3) * ((3) <= (r0)) + (r0) * ((r0) < (3)))), None)
tmp34 = tl.load(in_ptr0 + (48 + (4*((3) * ((3) <= (1 + r1)) + (1 + r1) * ((1 + r1) < (3)))) + (64*r2) + ((3) * ((3) <= (r0)) + (r0) * ((r0) < (3)))), None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp5 = tmp4 - tmp1
tmp6 = tmp5 * tmp5
tmp7 = tmp3 + tmp6
tmp8 = 1e-08
tmp9 = tmp7 + tmp8
tmp12 = tmp10 - tmp11
tmp13 = tmp12 * tmp12
tmp15 = tmp14 - tmp11
tmp16 = tmp15 * tmp15
tmp17 = tmp13 + tmp16
tmp18 = tmp17 + tmp8
tmp19 = tmp9 + tmp18
tmp22 = tmp20 - tmp21
tmp23 = tmp22 * tmp22
tmp25 = tmp24 - tmp21
tmp26 = tmp25 * tmp25
tmp27 = tmp23 + tmp26
tmp28 = tmp27 + tmp8
tmp29 = tmp19 + tmp28
tmp32 = tmp30 - tmp31
tmp33 = tmp32 * tmp32
tmp35 = tmp34 - tmp31
tmp36 = tmp35 * tmp35
tmp37 = tmp33 + tmp36
tmp38 = tmp37 + tmp8
tmp39 = tmp29 + tmp38
tmp40 = 4.0
tmp41 = tmp39 / tmp40
tmp42 = libdevice.sqrt(tmp41)
tmp43 = tl.broadcast_to(tmp42, [XBLOCK, RBLOCK])
tmp45 = tl.sum(tmp43, 1)[:, None]
tmp46 = 64.0
tmp47 = tmp45 / tmp46
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp47, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [x_diff, pow_1, y_diff, pow_2, add, diff, mean, sqrt, mean_1], Original ATen: [aten.sub, aten.pow, aten.add, aten.mean, aten.sqrt]
stream0 = get_raw_stream(0)
triton_per_fused_add_mean_pow_sqrt_sub_0.run(buf2, arg0_1, 1, 64, grid=grid(1), stream=stream0)
del arg0_1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from torch.nn import functional as F
class TVLoss(nn.Module):
def forward(self, input):
input = F.pad(input, (0, 1, 0, 1), 'replicate')
x_diff = input[..., :-1, 1:] - input[..., :-1, :-1]
y_diff = input[..., 1:, :-1] - input[..., :-1, :-1]
diff = x_diff ** 2 + y_diff ** 2 + 1e-08
return diff.mean(dim=1).sqrt().mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_mean_pow_sqrt_sub_0(in_out_ptr0, in_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 4
r1 = rindex // 4 % 4
r2 = rindex // 16
tmp0 = tl.load(in_ptr0 + (4 * (3 * (3 <= r1) + r1 * (r1 < 3)) + 64 * r2 +
(3 * (3 <= 1 + r0) + (1 + r0) * (1 + r0 < 3))), None)
tmp1 = tl.load(in_ptr0 + (4 * (3 * (3 <= r1) + r1 * (r1 < 3)) + 64 * r2 +
(3 * (3 <= r0) + r0 * (r0 < 3))), None)
tmp4 = tl.load(in_ptr0 + (4 * (3 * (3 <= 1 + r1) + (1 + r1) * (1 + r1 <
3)) + 64 * r2 + (3 * (3 <= r0) + r0 * (r0 < 3))), None)
tmp10 = tl.load(in_ptr0 + (16 + 4 * (3 * (3 <= r1) + r1 * (r1 < 3)) +
64 * r2 + (3 * (3 <= 1 + r0) + (1 + r0) * (1 + r0 < 3))), None)
tmp11 = tl.load(in_ptr0 + (16 + 4 * (3 * (3 <= r1) + r1 * (r1 < 3)) +
64 * r2 + (3 * (3 <= r0) + r0 * (r0 < 3))), None)
tmp14 = tl.load(in_ptr0 + (16 + 4 * (3 * (3 <= 1 + r1) + (1 + r1) * (1 +
r1 < 3)) + 64 * r2 + (3 * (3 <= r0) + r0 * (r0 < 3))), None)
tmp20 = tl.load(in_ptr0 + (32 + 4 * (3 * (3 <= r1) + r1 * (r1 < 3)) +
64 * r2 + (3 * (3 <= 1 + r0) + (1 + r0) * (1 + r0 < 3))), None)
tmp21 = tl.load(in_ptr0 + (32 + 4 * (3 * (3 <= r1) + r1 * (r1 < 3)) +
64 * r2 + (3 * (3 <= r0) + r0 * (r0 < 3))), None)
tmp24 = tl.load(in_ptr0 + (32 + 4 * (3 * (3 <= 1 + r1) + (1 + r1) * (1 +
r1 < 3)) + 64 * r2 + (3 * (3 <= r0) + r0 * (r0 < 3))), None)
tmp30 = tl.load(in_ptr0 + (48 + 4 * (3 * (3 <= r1) + r1 * (r1 < 3)) +
64 * r2 + (3 * (3 <= 1 + r0) + (1 + r0) * (1 + r0 < 3))), None)
tmp31 = tl.load(in_ptr0 + (48 + 4 * (3 * (3 <= r1) + r1 * (r1 < 3)) +
64 * r2 + (3 * (3 <= r0) + r0 * (r0 < 3))), None)
tmp34 = tl.load(in_ptr0 + (48 + 4 * (3 * (3 <= 1 + r1) + (1 + r1) * (1 +
r1 < 3)) + 64 * r2 + (3 * (3 <= r0) + r0 * (r0 < 3))), None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp5 = tmp4 - tmp1
tmp6 = tmp5 * tmp5
tmp7 = tmp3 + tmp6
tmp8 = 1e-08
tmp9 = tmp7 + tmp8
tmp12 = tmp10 - tmp11
tmp13 = tmp12 * tmp12
tmp15 = tmp14 - tmp11
tmp16 = tmp15 * tmp15
tmp17 = tmp13 + tmp16
tmp18 = tmp17 + tmp8
tmp19 = tmp9 + tmp18
tmp22 = tmp20 - tmp21
tmp23 = tmp22 * tmp22
tmp25 = tmp24 - tmp21
tmp26 = tmp25 * tmp25
tmp27 = tmp23 + tmp26
tmp28 = tmp27 + tmp8
tmp29 = tmp19 + tmp28
tmp32 = tmp30 - tmp31
tmp33 = tmp32 * tmp32
tmp35 = tmp34 - tmp31
tmp36 = tmp35 * tmp35
tmp37 = tmp33 + tmp36
tmp38 = tmp37 + tmp8
tmp39 = tmp29 + tmp38
tmp40 = 4.0
tmp41 = tmp39 / tmp40
tmp42 = libdevice.sqrt(tmp41)
tmp43 = tl.broadcast_to(tmp42, [XBLOCK, RBLOCK])
tmp45 = tl.sum(tmp43, 1)[:, None]
tmp46 = 64.0
tmp47 = tmp45 / tmp46
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp47, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
get_raw_stream(0)
triton_per_fused_add_mean_pow_sqrt_sub_0[grid(1)](buf2, arg0_1, 1,
64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
return buf2,
class TVLossNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| aradalienzzzz/vqgan-clip-app | TVLoss | false | 6,227 | [
"MIT"
] | 1 | f5a16d792da5ad0ede855254fe393f6b990c8e1d | https://github.com/aradalienzzzz/vqgan-clip-app/tree/f5a16d792da5ad0ede855254fe393f6b990c8e1d | import torch
import torch.nn as nn
from torch.nn import functional as F
class Model(nn.Module):
def forward(self, input):
input = F.pad(input, (0, 1, 0, 1), 'replicate')
x_diff = input[..., :-1, 1:] - input[..., :-1, :-1]
y_diff = input[..., 1:, :-1] - input[..., :-1, :-1]
diff = x_diff ** 2 + y_diff ** 2 + 1e-08
return diff.mean(dim=1).sqrt().mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
StdConv2d | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/sb/csb6dntsb7xdmmuaslmjpxol3sfazdmjhrcbsj5qmbxg6p6o3anh.py
# Topologically Sorted Source Nodes: [var_mean, sub, add, sqrt, w], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
# Source node to ATen node mapping:
# add => add
# sqrt => sqrt
# sub => sub
# var_mean => var_mean
# w => div
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_1, [1, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %getitem_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %sqrt : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %sqrt), kwargs = {})
triton_per_fused_add_div_sqrt_sub_var_mean_0 = async_compile.triton('triton_per_fused_add_div_sqrt_sub_var_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_sqrt_sub_var_mean_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_sqrt_sub_var_mean_0(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 64.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.sqrt(tmp20)
tmp22 = tmp0 - tmp10
tmp23 = tmp22 / tmp21
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp21, xmask)
tl.store(out_ptr1 + (r1 + (64*x0)), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/tc/ctcagp37ljugm52zu6ckorigrppqo67voefe2f2odg5r6hyllhyu.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %div, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf3 = reinterpret_tensor(buf1, (4, 1, 1, 1), (1, 1, 1, 1), 0); del buf1 # reuse
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [var_mean, sub, add, sqrt, w], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_sqrt_sub_var_mean_0.run(buf3, primals_1, buf4, 4, 64, grid=grid(4), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(primals_3, buf4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 1, 1), (4, 1, 1, 1))
buf6 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf6, primals_2, 16, grid=grid(16), stream=stream0)
del primals_2
return (buf6, primals_1, primals_3, buf3, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class StdConv2d(nn.Conv2d):
def forward(self, x):
w = self.weight
v, m = torch.var_mean(w, dim=[1, 2, 3], keepdim=True, unbiased=False)
w = (w - m) / torch.sqrt(v + 1e-05)
return F.conv2d(x, w, self.bias, self.stride, self.padding, self.
dilation, self.groups)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_add_div_sqrt_sub_var_mean_0(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 64.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.sqrt(tmp20)
tmp22 = tmp0 - tmp10
tmp23 = tmp22 / tmp21
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp21, xmask)
tl.store(out_ptr1 + (r1 + 64 * x0), tmp23, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf3 = reinterpret_tensor(buf1, (4, 1, 1, 1), (1, 1, 1, 1), 0)
del buf1
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_add_div_sqrt_sub_var_mean_0[grid(4)](buf3,
primals_1, buf4, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
buf5 = extern_kernels.convolution(primals_3, buf4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 1, 1), (4, 1, 1, 1))
buf6 = buf5
del buf5
triton_poi_fused_convolution_1[grid(16)](buf6, primals_2, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_2
return buf6, primals_1, primals_3, buf3, buf4
class StdConv2dNew(nn.Conv2d):
def forward(self, input_0):
primals_1 = self.weight
primals_2 = self.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| aris-mukherjee/TransUNet-modified | StdConv2d | false | 6,228 | [
"Apache-2.0"
] | 1 | 185307b677fd6ee05604213c90e14e028fab476a | https://github.com/aris-mukherjee/TransUNet-modified/tree/185307b677fd6ee05604213c90e14e028fab476a | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Conv2d):
def forward(self, x):
w = self.weight
v, m = torch.var_mean(w, dim=[1, 2, 3], keepdim=True, unbiased=False)
w = (w - m) / torch.sqrt(v + 1e-05)
return F.conv2d(x, w, self.bias, self.stride, self.padding, self.
dilation, self.groups)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
LearnedPositionalEmbedding1D | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/i5/ci5qztmjm5trfof5nnr4q6zfdgt5kz3i6kpes2s3qrkgziadh2ag.py
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
# Source node to ATen node mapping:
# add => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_2, %primals_1), kwargs = {})
triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (1, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_0.run(primals_2, primals_1, buf0, 256, grid=grid(256), stream=stream0)
del primals_1
del primals_2
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((1, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn
import torch.autograd
class LearnedPositionalEmbedding1D(nn.Module):
"""Adds (optionally learned) positional embeddings to the inputs."""
def __init__(self, seq_len, dim):
super().__init__()
self.pos_embedding = nn.Parameter(torch.zeros(1, seq_len, dim))
def forward(self, x):
"""Input has shape `(batch_size, seq_len, emb_dim)`"""
return x + self.pos_embedding
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'seq_len': 4, 'dim': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
import torch.nn
import torch.autograd
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (1, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(256)](primals_2, primals_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_2
return buf0,
class LearnedPositionalEmbedding1DNew(nn.Module):
"""Adds (optionally learned) positional embeddings to the inputs."""
def __init__(self, seq_len, dim):
super().__init__()
self.pos_embedding = nn.Parameter(torch.zeros(1, seq_len, dim))
def forward(self, input_0):
primals_1 = self.pos_embedding
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
| arkel23/yuwu | LearnedPositionalEmbedding1D | false | 6,229 | [
"MIT"
] | 1 | 4dcf0e18693e09a947569ddcc7cb3ff00c7c674a | https://github.com/arkel23/yuwu/tree/4dcf0e18693e09a947569ddcc7cb3ff00c7c674a | import torch
from torch import nn
import torch.nn
import torch.autograd
class Model(nn.Module):
"""Adds (optionally learned) positional embeddings to the inputs."""
def __init__(self, seq_len, dim):
super().__init__()
self.pos_embedding = nn.Parameter(torch.zeros(1, seq_len, dim))
def forward(self, x):
"""Input has shape `(batch_size, seq_len, emb_dim)`"""
return x + self.pos_embedding
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
BasicBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/wv/cwvsxo4q6wyoxpozsubbimmg6xvl34ow44hy6yl5mwa23uuy77sa.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%mm_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(in_out_ptr0 + (x0), tmp2, xmask)
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [xw], Original ATen: [aten.mm]
extern_kernels.mm(primals_1, primals_2, out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.mm]
extern_kernels.mm(primals_3, buf0, out=buf1)
del buf0
buf2 = buf1; del buf1 # reuse
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf2, buf3, 16, grid=grid(16), stream=stream0)
return (buf2, buf3, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
class GraphConv(nn.Module):
def __init__(self, in_features, out_features, bias=False):
super(GraphConv, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.FloatTensor(in_features, out_features))
if bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, x, adj, D=None):
if x.dim() == 3:
xw = torch.matmul(x, self.weight)
output = torch.bmm(adj, xw)
elif x.dim() == 2:
xw = torch.mm(x, self.weight)
output = torch.spmm(adj, xw)
if D is not None:
output = output * 1.0 / D
return output
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features
) + ' -> ' + str(self.out_features) + ')'
class BasicBlock(nn.Module):
def __init__(self, inplanes, planes, dropout=0.0):
super(BasicBlock, self).__init__()
self.gc = GraphConv(inplanes, planes)
self.relu = nn.ReLU(inplace=True)
if dropout > 0:
self.dropout = nn.Dropout2d(p=dropout)
else:
self.dropout = None
def forward(self, x, adj, D=None):
x = self.gc(x, adj, D)
x = self.relu(x)
if self.dropout is not None:
x = self.dropout(x)
return x
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'inplanes': 4, 'planes': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import math
import torch.nn as nn
from torch.nn.parameter import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(in_out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, primals_2, out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_3, buf0, out=buf1)
del buf0
buf2 = buf1
del buf1
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(16)](buf2, buf3, 16,
XBLOCK=16, num_warps=1, num_stages=1)
return buf2, buf3, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0
), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0)
class GraphConv(nn.Module):
def __init__(self, in_features, out_features, bias=False):
super(GraphConv, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.FloatTensor(in_features, out_features))
if bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, x, adj, D=None):
if x.dim() == 3:
xw = torch.matmul(x, self.weight)
output = torch.bmm(adj, xw)
elif x.dim() == 2:
xw = torch.mm(x, self.weight)
output = torch.spmm(adj, xw)
if D is not None:
output = output * 1.0 / D
return output
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features
) + ' -> ' + str(self.out_features) + ')'
class BasicBlockNew(nn.Module):
def __init__(self, inplanes, planes, dropout=0.0):
super(BasicBlockNew, self).__init__()
self.gc = GraphConv(inplanes, planes)
self.relu = nn.ReLU(inplace=True)
if dropout > 0:
self.dropout = nn.Dropout2d(p=dropout)
else:
self.dropout = None
def forward(self, input_0, input_1):
primals_1 = self.gc.weight
primals_2 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3])
return output[0]
| ardihikaru/learn-to-cluster | BasicBlock | false | 6,230 | [
"MIT"
] | 1 | d7a5ea0946f7b402f8878bfd608bf3e0dc9a26ca | https://github.com/ardihikaru/learn-to-cluster/tree/d7a5ea0946f7b402f8878bfd608bf3e0dc9a26ca | import math
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
class GraphConv(nn.Module):
def __init__(self, in_features, out_features, bias=False):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.FloatTensor(in_features, out_features))
if bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, x, adj, D=None):
if x.dim() == 3:
xw = torch.matmul(x, self.weight)
output = torch.bmm(adj, xw)
elif x.dim() == 2:
xw = torch.mm(x, self.weight)
output = torch.spmm(adj, xw)
if D is not None:
output = output * 1.0 / D
return output
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features
) + ' -> ' + str(self.out_features) + ')'
class Model(nn.Module):
def __init__(self, inplanes, planes, dropout=0.0):
super().__init__()
self.gc = GraphConv(inplanes, planes)
self.relu = nn.ReLU(inplace=True)
if dropout > 0:
self.dropout = nn.Dropout2d(p=dropout)
else:
self.dropout = None
def forward(self, x, adj, D=None):
x = self.gc(x, adj, D)
x = self.relu(x)
if self.dropout is not None:
x = self.dropout(x)
return x
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [4, 4]
|
BasicBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/t2/ct2u7hbdimmscpyuhwui3vspbro3o7jotkwbveevem3fmwohq7po.py
# Topologically Sorted Source Nodes: [pow_1, nu2, abs_1, add, rsqrt, x, mul_1, add_1, max_1, out], Original ATen: [aten.pow, aten.mean, aten.abs, aten.add, aten.rsqrt, aten.mul, aten.maximum, aten.relu]
# Source node to ATen node mapping:
# abs_1 => abs_1
# add => add
# add_1 => add_1
# max_1 => maximum
# mul_1 => mul_1
# nu2 => mean
# out => relu
# pow_1 => pow_1
# rsqrt => rsqrt
# x => mul
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%convolution, 2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [2, 3], True), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%primals_3,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, %abs_1), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_4, %mul), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_5), kwargs = {})
# %maximum : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%add_1, %primals_6), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%maximum,), kwargs = {})
triton_per_fused_abs_add_maximum_mean_mul_pow_relu_rsqrt_0 = async_compile.triton('triton_per_fused_abs_add_maximum_mean_mul_pow_relu_rsqrt_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_add_maximum_mean_mul_pow_relu_rsqrt_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_abs_add_maximum_mean_mul_pow_relu_rsqrt_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp8 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr3 + (x2), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr4 + (x2), xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = 16.0
tmp7 = tmp5 / tmp6
tmp9 = tl_math.abs(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = libdevice.rsqrt(tmp10)
tmp13 = tmp0 * tmp11
tmp14 = tmp12 * tmp13
tmp16 = tmp14 + tmp15
tmp18 = triton_helpers.maximum(tmp16, tmp17)
tmp19 = tl.full([1, 1], 0, tl.int32)
tmp20 = triton_helpers.maximum(tmp19, tmp18)
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp11, xmask)
tl.store(out_ptr0 + (r1 + (16*x0)), tmp20, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/bh/cbhkt6fcg5sbuhfq7ezlrsawx6ui6n4uuzlvlvgvcrodiji6dr5e.py
# Topologically Sorted Source Nodes: [pow_2, nu2_1, abs_2, add_2, rsqrt_1, x_1, mul_3, add_3, out_1, out_2, out_3], Original ATen: [aten.pow, aten.mean, aten.abs, aten.add, aten.rsqrt, aten.mul, aten.maximum, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# abs_2 => abs_2
# add_2 => add_2
# add_3 => add_3
# mul_3 => mul_3
# nu2_1 => mean_1
# out_1 => maximum_1
# out_2 => add_4
# out_3 => relu_1
# pow_2 => pow_2
# rsqrt_1 => rsqrt_1
# x_1 => mul_2
# Graph fragment:
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%convolution_1, 2), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_2, [2, 3], True), kwargs = {})
# %abs_2 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%primals_8,), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean_1, %abs_2), kwargs = {})
# %rsqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_2,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_1, %rsqrt_1), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_9, %mul_2), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %primals_10), kwargs = {})
# %maximum_1 : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%add_3, %primals_11), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%maximum_1, %primals_2), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_4,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_per_fused_abs_add_maximum_mean_mul_pow_relu_rsqrt_threshold_backward_1 = async_compile.triton('triton_per_fused_abs_add_maximum_mean_mul_pow_relu_rsqrt_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*i1', 9: 'i32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_add_maximum_mean_mul_pow_relu_rsqrt_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_abs_add_maximum_mean_mul_pow_relu_rsqrt_threshold_backward_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp8 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr3 + (x2), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr4 + (x2), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr5 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = 16.0
tmp7 = tmp5 / tmp6
tmp9 = tl_math.abs(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = libdevice.rsqrt(tmp10)
tmp13 = tmp0 * tmp11
tmp14 = tmp12 * tmp13
tmp16 = tmp14 + tmp15
tmp18 = triton_helpers.maximum(tmp16, tmp17)
tmp20 = tmp18 + tmp19
tmp21 = tl.full([1, 1], 0, tl.int32)
tmp22 = triton_helpers.maximum(tmp21, tmp20)
tmp23 = 0.0
tmp24 = tmp22 <= tmp23
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp11, xmask)
tl.store(out_ptr0 + (r1 + (16*x0)), tmp22, xmask)
tl.store(out_ptr1 + (r1 + (16*x0)), tmp24, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_4, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_6, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_7, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_8, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_9, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_10, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_11, (1, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf2 = reinterpret_tensor(buf1, (4, 4, 1, 1), (4, 1, 1, 1), 0); del buf1 # reuse
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pow_1, nu2, abs_1, add, rsqrt, x, mul_1, add_1, max_1, out], Original ATen: [aten.pow, aten.mean, aten.abs, aten.add, aten.rsqrt, aten.mul, aten.maximum, aten.relu]
stream0 = get_raw_stream(0)
triton_per_fused_abs_add_maximum_mean_mul_pow_relu_rsqrt_0.run(buf2, buf0, primals_3, primals_4, primals_5, primals_6, buf3, 16, 16, grid=grid(16), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_7, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1))
buf5 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf6 = reinterpret_tensor(buf5, (4, 4, 1, 1), (4, 1, 1, 1), 0); del buf5 # reuse
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [pow_2, nu2_1, abs_2, add_2, rsqrt_1, x_1, mul_3, add_3, out_1, out_2, out_3], Original ATen: [aten.pow, aten.mean, aten.abs, aten.add, aten.rsqrt, aten.mul, aten.maximum, aten.relu, aten.threshold_backward]
triton_per_fused_abs_add_maximum_mean_mul_pow_relu_rsqrt_threshold_backward_1.run(buf6, buf4, primals_8, primals_9, primals_10, primals_11, primals_2, buf7, buf8, 16, 16, grid=grid(16), stream=stream0)
del primals_8
return (buf7, primals_1, primals_2, primals_4, primals_5, primals_6, primals_7, primals_9, primals_10, primals_11, buf0, buf2, buf3, buf4, buf6, buf8, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data.distributed
class FilterResponseNormNd(nn.Module):
def __init__(self, ndim, num_features, eps=1e-06, learnable_eps=False):
"""
Input Variables:
----------------
ndim: An integer indicating the number of dimensions of the expected input tensor.
num_features: An integer indicating the number of input feature dimensions.
eps: A scalar constant or learnable variable.
learnable_eps: A bool value indicating whether the eps is learnable.
"""
assert ndim in [3, 4, 5
], 'FilterResponseNorm only supports 3d, 4d or 5d inputs.'
super(FilterResponseNormNd, self).__init__()
shape = (1, num_features) + (1,) * (ndim - 2)
self.eps = nn.Parameter(torch.ones(*shape) * eps)
if not learnable_eps:
self.eps.requires_grad_(False)
self.gamma = nn.Parameter(torch.Tensor(*shape))
self.beta = nn.Parameter(torch.Tensor(*shape))
self.tau = nn.Parameter(torch.Tensor(*shape))
self.reset_parameters()
def forward(self, x):
avg_dims = tuple(range(2, x.dim()))
nu2 = torch.pow(x, 2).mean(dim=avg_dims, keepdim=True)
x = x * torch.rsqrt(nu2 + torch.abs(self.eps))
return torch.max(self.gamma * x + self.beta, self.tau)
def reset_parameters(self):
nn.init.ones_(self.gamma)
nn.init.zeros_(self.beta)
nn.init.zeros_(self.tau)
class FilterResponseNorm2d(FilterResponseNormNd):
def __init__(self, num_features, eps=1e-06, learnable_eps=False):
super(FilterResponseNorm2d, self).__init__(4, num_features, eps=eps,
learnable_eps=learnable_eps)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=
stride, padding=1, bias=False)
self.bn1 = FilterResponseNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.bn2 = FilterResponseNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, self.
expansion * planes, kernel_size=1, stride=stride, bias=
False), FilterResponseNorm2d(self.expansion * planes))
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_planes': 4, 'planes': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_abs_add_maximum_mean_mul_pow_relu_rsqrt_0(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp8 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr3 + x2, xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = 16.0
tmp7 = tmp5 / tmp6
tmp9 = tl_math.abs(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = libdevice.rsqrt(tmp10)
tmp13 = tmp0 * tmp11
tmp14 = tmp12 * tmp13
tmp16 = tmp14 + tmp15
tmp18 = triton_helpers.maximum(tmp16, tmp17)
tmp19 = tl.full([1, 1], 0, tl.int32)
tmp20 = triton_helpers.maximum(tmp19, tmp18)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp11, xmask)
tl.store(out_ptr0 + (r1 + 16 * x0), tmp20, xmask)
@triton.jit
def triton_per_fused_abs_add_maximum_mean_mul_pow_relu_rsqrt_threshold_backward_1(
in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5,
out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp8 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr3 + x2, xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr5 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = 16.0
tmp7 = tmp5 / tmp6
tmp9 = tl_math.abs(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = libdevice.rsqrt(tmp10)
tmp13 = tmp0 * tmp11
tmp14 = tmp12 * tmp13
tmp16 = tmp14 + tmp15
tmp18 = triton_helpers.maximum(tmp16, tmp17)
tmp20 = tmp18 + tmp19
tmp21 = tl.full([1, 1], 0, tl.int32)
tmp22 = triton_helpers.maximum(tmp21, tmp20)
tmp23 = 0.0
tmp24 = tmp22 <= tmp23
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp11, xmask)
tl.store(out_ptr0 + (r1 + 16 * x0), tmp22, xmask)
tl.store(out_ptr1 + (r1 + 16 * x0), tmp24, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_4, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_6, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_7, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_8, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_9, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_10, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_11, (1, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf2 = reinterpret_tensor(buf1, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf1
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_abs_add_maximum_mean_mul_pow_relu_rsqrt_0[grid(16)](
buf2, buf0, primals_3, primals_4, primals_5, primals_6, buf3,
16, 16, XBLOCK=8, num_warps=2, num_stages=1)
del primals_3
buf4 = extern_kernels.convolution(buf3, primals_7, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1))
buf5 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf6 = reinterpret_tensor(buf5, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf5
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_per_fused_abs_add_maximum_mean_mul_pow_relu_rsqrt_threshold_backward_1[
grid(16)](buf6, buf4, primals_8, primals_9, primals_10,
primals_11, primals_2, buf7, buf8, 16, 16, XBLOCK=8, num_warps=
2, num_stages=1)
del primals_8
return (buf7, primals_1, primals_2, primals_4, primals_5, primals_6,
primals_7, primals_9, primals_10, primals_11, buf0, buf2, buf3,
buf4, buf6, buf8)
class FilterResponseNormNd(nn.Module):
def __init__(self, ndim, num_features, eps=1e-06, learnable_eps=False):
"""
Input Variables:
----------------
ndim: An integer indicating the number of dimensions of the expected input tensor.
num_features: An integer indicating the number of input feature dimensions.
eps: A scalar constant or learnable variable.
learnable_eps: A bool value indicating whether the eps is learnable.
"""
assert ndim in [3, 4, 5
], 'FilterResponseNorm only supports 3d, 4d or 5d inputs.'
super(FilterResponseNormNd, self).__init__()
shape = (1, num_features) + (1,) * (ndim - 2)
self.eps = nn.Parameter(torch.ones(*shape) * eps)
if not learnable_eps:
self.eps.requires_grad_(False)
self.gamma = nn.Parameter(torch.Tensor(*shape))
self.beta = nn.Parameter(torch.Tensor(*shape))
self.tau = nn.Parameter(torch.Tensor(*shape))
self.reset_parameters()
def forward(self, x):
avg_dims = tuple(range(2, x.dim()))
nu2 = torch.pow(x, 2).mean(dim=avg_dims, keepdim=True)
x = x * torch.rsqrt(nu2 + torch.abs(self.eps))
return torch.max(self.gamma * x + self.beta, self.tau)
def reset_parameters(self):
nn.init.ones_(self.gamma)
nn.init.zeros_(self.beta)
nn.init.zeros_(self.tau)
class FilterResponseNorm2d(FilterResponseNormNd):
def __init__(self, num_features, eps=1e-06, learnable_eps=False):
super(FilterResponseNorm2d, self).__init__(4, num_features, eps=eps,
learnable_eps=learnable_eps)
class BasicBlockNew(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlockNew, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=
stride, padding=1, bias=False)
self.bn1 = FilterResponseNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.bn2 = FilterResponseNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, self.
expansion * planes, kernel_size=1, stride=stride, bias=
False), FilterResponseNorm2d(self.expansion * planes))
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_3 = self.bn1.eps
primals_4 = self.bn1.gamma
primals_5 = self.bn1.beta
primals_6 = self.bn1.tau
primals_7 = self.conv2.weight
primals_8 = self.bn2.eps
primals_9 = self.bn2.gamma
primals_10 = self.bn2.beta
primals_11 = self.bn2.tau
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
| aouedions11/SSFL-Benchmarking-Semi-supervised-Federated-Learning | BasicBlock | false | 6,231 | [
"MIT"
] | 1 | 78aec81919bf95ed4677d0e0a4ebbbe3be455742 | https://github.com/aouedions11/SSFL-Benchmarking-Semi-supervised-Federated-Learning/tree/78aec81919bf95ed4677d0e0a4ebbbe3be455742 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data.distributed
class FilterResponseNormNd(nn.Module):
def __init__(self, ndim, num_features, eps=1e-06, learnable_eps=False):
"""
Input Variables:
----------------
ndim: An integer indicating the number of dimensions of the expected input tensor.
num_features: An integer indicating the number of input feature dimensions.
eps: A scalar constant or learnable variable.
learnable_eps: A bool value indicating whether the eps is learnable.
"""
assert ndim in [3, 4, 5
], 'FilterResponseNorm only supports 3d, 4d or 5d inputs.'
super().__init__()
shape = (1, num_features) + (1,) * (ndim - 2)
self.eps = nn.Parameter(torch.ones(*shape) * eps)
if not learnable_eps:
self.eps.requires_grad_(False)
self.gamma = nn.Parameter(torch.Tensor(*shape))
self.beta = nn.Parameter(torch.Tensor(*shape))
self.tau = nn.Parameter(torch.Tensor(*shape))
self.reset_parameters()
def forward(self, x):
avg_dims = tuple(range(2, x.dim()))
nu2 = torch.pow(x, 2).mean(dim=avg_dims, keepdim=True)
x = x * torch.rsqrt(nu2 + torch.abs(self.eps))
return torch.max(self.gamma * x + self.beta, self.tau)
def reset_parameters(self):
nn.init.ones_(self.gamma)
nn.init.zeros_(self.beta)
nn.init.zeros_(self.tau)
class FilterResponseNorm2d(FilterResponseNormNd):
def __init__(self, num_features, eps=1e-06, learnable_eps=False):
super().__init__(4, num_features, eps=eps,
learnable_eps=learnable_eps)
class Model(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super().__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=
stride, padding=1, bias=False)
self.bn1 = FilterResponseNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.bn2 = FilterResponseNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, self.
expansion * planes, kernel_size=1, stride=stride, bias=
False), FilterResponseNorm2d(self.expansion * planes))
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
Embedding | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ys/cys6fbrc4ncrm6vnp4nursuec7gsli6t4pfg3v4fvdqv5fn6sq6g.py
# Topologically Sorted Source Nodes: [getitem], Original ATen: [aten.index]
# Source node to ATen node mapping:
# getitem => index
# Graph fragment:
# %index : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%primals_1, [%primals_2]), kwargs = {})
triton_poi_fused_index_0 = async_compile.triton('triton_poi_fused_index_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_index_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert(((0 <= tmp4) & (tmp4 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp4 < 4")
tmp6 = tl.load(in_ptr1 + (x0 + (4*tmp4)), xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [getitem], Original ATen: [aten.index]
stream0 = get_raw_stream(0)
triton_poi_fused_index_0.run(primals_2, primals_1, buf0, 16, grid=grid(16), stream=stream0)
del primals_1
return (buf0, primals_2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.int64)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch as t
import torch.nn as nn
import torch.utils.data
class Embedding(nn.Module):
"""
Redefining torch.nn.Embedding (see docs for that function)
"""
def __init__(self, num_embeddings, embedding_dim, padding_idx=None,
_weight=None):
super().__init__()
self.num_embeddings = num_embeddings
self.embedding_dim = embedding_dim
if padding_idx is not None:
if padding_idx > 0:
assert padding_idx < self.num_embeddings, 'Padding_idx must be within num_embeddings'
elif padding_idx < 0:
assert padding_idx >= -self.num_embeddings, 'Padding_idx must be within num_embeddings'
padding_idx = self.num_embeddings + padding_idx
self.padding_idx = padding_idx
if _weight is None:
self.weight = nn.Parameter(t.randn([self.num_embeddings, self.
embedding_dim]) / np.sqrt(self.num_embeddings))
else:
assert list(_weight.shape) == [num_embeddings, embedding_dim
], 'Shape of weight does not match num_embeddings and embedding_dim'
self.weight = nn.Parameter(_weight)
if self.padding_idx is not None:
with t.no_grad():
self.weight[self.padding_idx].fill_(0)
def forward(self, x):
if self.padding_idx is not None:
with t.no_grad():
self.weight[self.padding_idx].fill_(0)
return self.weight[x]
def get_inputs():
return [torch.ones([4], dtype=torch.int64)]
def get_init_inputs():
return [[], {'num_embeddings': 4, 'embedding_dim': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import numpy as np
import torch as t
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_index_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert((0 <= tmp4) & (tmp4 < 4) | ~xmask,
'index out of bounds: 0 <= tmp4 < 4')
tmp6 = tl.load(in_ptr1 + (x0 + 4 * tmp4), xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_index_0[grid(16)](primals_2, primals_1, buf0, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_1
return buf0, primals_2
class EmbeddingNew(nn.Module):
"""
Redefining torch.nn.Embedding (see docs for that function)
"""
def __init__(self, num_embeddings, embedding_dim, padding_idx=None,
_weight=None):
super().__init__()
self.num_embeddings = num_embeddings
self.embedding_dim = embedding_dim
if padding_idx is not None:
if padding_idx > 0:
assert padding_idx < self.num_embeddings, 'Padding_idx must be within num_embeddings'
elif padding_idx < 0:
assert padding_idx >= -self.num_embeddings, 'Padding_idx must be within num_embeddings'
padding_idx = self.num_embeddings + padding_idx
self.padding_idx = padding_idx
if _weight is None:
self.weight = nn.Parameter(t.randn([self.num_embeddings, self.
embedding_dim]) / np.sqrt(self.num_embeddings))
else:
assert list(_weight.shape) == [num_embeddings, embedding_dim
], 'Shape of weight does not match num_embeddings and embedding_dim'
self.weight = nn.Parameter(_weight)
if self.padding_idx is not None:
with t.no_grad():
self.weight[self.padding_idx].fill_(0)
def forward(self, input_0):
primals_1 = self.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
| arjunsesh/lrr-neurips | Embedding | false | 6,232 | [
"MIT"
] | 1 | d78106daec1e729b02a0452f74a37bf004ed243c | https://github.com/arjunsesh/lrr-neurips/tree/d78106daec1e729b02a0452f74a37bf004ed243c | import torch
import numpy as np
import torch as t
import torch.nn as nn
import torch.utils.data
class Model(nn.Module):
"""
Redefining torch.nn.Embedding (see docs for that function)
"""
def __init__(self, num_embeddings, embedding_dim, padding_idx=None,
_weight=None):
super().__init__()
self.num_embeddings = num_embeddings
self.embedding_dim = embedding_dim
if padding_idx is not None:
if padding_idx > 0:
assert padding_idx < self.num_embeddings, 'Padding_idx must be within num_embeddings'
elif padding_idx < 0:
assert padding_idx >= -self.num_embeddings, 'Padding_idx must be within num_embeddings'
padding_idx = self.num_embeddings + padding_idx
self.padding_idx = padding_idx
if _weight is None:
self.weight = nn.Parameter(t.randn([self.num_embeddings, self.
embedding_dim]) / np.sqrt(self.num_embeddings))
else:
assert list(_weight.shape) == [num_embeddings, embedding_dim
], 'Shape of weight does not match num_embeddings and embedding_dim'
self.weight = nn.Parameter(_weight)
if self.padding_idx is not None:
with t.no_grad():
self.weight[self.padding_idx].fill_(0)
def forward(self, x):
if self.padding_idx is not None:
with t.no_grad():
self.weight[self.padding_idx].fill_(0)
return self.weight[x]
def get_inputs():
return [torch.ones([4], dtype=torch.int64)]
def get_init_inputs():
return [4, 4]
|
Attention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/45/c45kseiahg2pyc2i3fnpuo4uo4dk75vbormylc7ceqkbk6komm3x.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%repeat, %primals_1], 2), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x2 = (xindex // 32)
x3 = (xindex // 8)
x4 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x2) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + ((4*x3) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x4), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/lz/clzc7c4rqtr7ky6jrepxpu2dlmeo4y66gzcis5bqhwixpt7ktopj.py
# Topologically Sorted Source Nodes: [energy], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# energy => tanh
# Graph fragment:
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {})
triton_poi_fused_tanh_1 = async_compile.triton('triton_poi_fused_tanh_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/gd/cgdtd7uw2iemby2kfb22fx3vkhdbrpyx2y2l6nq45fmox3ad7stv.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%squeeze_1, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%squeeze_1, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/qs/cqsyda2m63ct5ijcfgcipyyfn273chi5d3kmpjuf5asa7h4wdpdv.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (1, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_2, primals_1, buf0, 128, grid=grid(128), stream=stream0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf0, (16, 8), (8, 1), 0), reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf1)
del primals_3
buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [energy], Original ATen: [aten.tanh]
triton_poi_fused_tanh_1.run(buf2, primals_4, 64, grid=grid(64), stream=stream0)
del primals_4
buf3 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 1), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf3, buf4, 16, grid=grid(16), stream=stream0)
buf5 = reinterpret_tensor(buf3, (4, 4), (4, 1), 0); del buf3 # reuse
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_3.run(buf4, buf5, 16, grid=grid(16), stream=stream0)
del buf4
return (buf5, reinterpret_tensor(buf0, (16, 8), (8, 1), 0), buf2, buf5, primals_5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
from torch import nn
class Attention(nn.Module):
"""Attention Layer merging Encoder and Decoder
Attributes:
hidden_size (int):
The number of features in the hidden state h
Reference:
* https://github.com/bentrevett/pytorch-seq2seq
"""
def __init__(self, hidden_size):
super().__init__()
self.attn = nn.Linear(hidden_size + hidden_size, hidden_size)
self.v = nn.Linear(hidden_size, 1, bias=False)
def forward(self, hidden, encoder_outputs):
"""
Args:
hidden: previous hidden state of the decoder [batch_size, hidden_size]
encoder_outputs: outputs of encoder
[batch_size, sample_size, num_directions*hidden_size]
"""
sample_size = encoder_outputs.shape[1]
hidden = hidden.squeeze(0)
hidden = hidden.unsqueeze(1).repeat(1, sample_size, 1)
energy = torch.tanh(self.attn(torch.cat((hidden, encoder_outputs),
dim=2)))
attention = self.v(energy).squeeze(2)
return F.softmax(attention, dim=1)
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x2 = xindex // 32
x3 = xindex // 8
x4 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x2 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x3 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x4, tmp10, xmask)
@triton.jit
def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (1, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(128)](primals_2, primals_1, buf0, 128,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (16, 8), (8, 1), 0),
reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf1)
del primals_3
buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0)
del buf1
triton_poi_fused_tanh_1[grid(64)](buf2, primals_4, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 1), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_2[grid(16)](buf3, buf4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf5 = reinterpret_tensor(buf3, (4, 4), (4, 1), 0)
del buf3
triton_poi_fused__softmax_3[grid(16)](buf4, buf5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf4
return buf5, reinterpret_tensor(buf0, (16, 8), (8, 1), 0
), buf2, buf5, primals_5
class AttentionNew(nn.Module):
"""Attention Layer merging Encoder and Decoder
Attributes:
hidden_size (int):
The number of features in the hidden state h
Reference:
* https://github.com/bentrevett/pytorch-seq2seq
"""
def __init__(self, hidden_size):
super().__init__()
self.attn = nn.Linear(hidden_size + hidden_size, hidden_size)
self.v = nn.Linear(hidden_size, 1, bias=False)
def forward(self, input_0, input_1):
primals_3 = self.attn.weight
primals_4 = self.attn.bias
primals_5 = self.v.weight
primals_2 = input_0
primals_1 = input_1
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| appleparan/mise.py | Attention | false | 6,233 | [
"MIT"
] | 1 | a77ea51be37a739928600c66d168d69b78bc0c4b | https://github.com/appleparan/mise.py/tree/a77ea51be37a739928600c66d168d69b78bc0c4b | import torch
import torch.nn.functional as F
from torch import nn
class Model(nn.Module):
"""Attention Layer merging Encoder and Decoder
Attributes:
hidden_size (int):
The number of features in the hidden state h
Reference:
* https://github.com/bentrevett/pytorch-seq2seq
"""
def __init__(self, hidden_size):
super().__init__()
self.attn = nn.Linear(hidden_size + hidden_size, hidden_size)
self.v = nn.Linear(hidden_size, 1, bias=False)
def forward(self, hidden, encoder_outputs):
"""
Args:
hidden: previous hidden state of the decoder [batch_size, hidden_size]
encoder_outputs: outputs of encoder
[batch_size, sample_size, num_directions*hidden_size]
"""
sample_size = encoder_outputs.shape[1]
hidden = hidden.squeeze(0)
hidden = hidden.unsqueeze(1).repeat(1, sample_size, 1)
energy = torch.tanh(self.attn(torch.cat((hidden, encoder_outputs),
dim=2)))
attention = self.v(energy).squeeze(2)
return F.softmax(attention, dim=1)
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [4]
|
FilterResponseNormNd | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/rk/crkf62gn566poghbdxlzmg7dulwy2vx6hyknt4yzw6bx5roqbiom.py
# Topologically Sorted Source Nodes: [pow_1, nu2, abs_1, add, rsqrt, x, mul_1, add_1, max_1], Original ATen: [aten.pow, aten.mean, aten.abs, aten.add, aten.rsqrt, aten.mul, aten.maximum]
# Source node to ATen node mapping:
# abs_1 => abs_1
# add => add
# add_1 => add_1
# max_1 => maximum
# mul_1 => mul_1
# nu2 => mean
# pow_1 => pow_1
# rsqrt => rsqrt
# x => mul
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_1, 2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [2, 3], True), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%primals_2,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, %abs_1), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, %mul), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_4), kwargs = {})
# %maximum : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%add_1, %primals_5), kwargs = {})
triton_per_fused_abs_add_maximum_mean_mul_pow_rsqrt_0 = async_compile.triton('triton_per_fused_abs_add_maximum_mean_mul_pow_rsqrt_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_add_maximum_mean_mul_pow_rsqrt_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_abs_add_maximum_mean_mul_pow_rsqrt_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp8 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr3 + (x2), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr4 + (x2), xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = 16.0
tmp7 = tmp5 / tmp6
tmp9 = tl_math.abs(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = libdevice.rsqrt(tmp10)
tmp13 = tmp0 * tmp11
tmp14 = tmp12 * tmp13
tmp16 = tmp14 + tmp15
tmp18 = triton_helpers.maximum(tmp16, tmp17)
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp11, xmask)
tl.store(out_ptr0 + (r1 + (16*x0)), tmp18, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_4, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (1, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0); del buf0 # reuse
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pow_1, nu2, abs_1, add, rsqrt, x, mul_1, add_1, max_1], Original ATen: [aten.pow, aten.mean, aten.abs, aten.add, aten.rsqrt, aten.mul, aten.maximum]
stream0 = get_raw_stream(0)
triton_per_fused_abs_add_maximum_mean_mul_pow_rsqrt_0.run(buf1, primals_1, primals_2, primals_3, primals_4, primals_5, buf2, 16, 16, grid=grid(16), stream=stream0)
del primals_2
return (buf2, primals_1, primals_3, primals_4, primals_5, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data.distributed
class FilterResponseNormNd(nn.Module):
def __init__(self, ndim, num_features, eps=1e-06, learnable_eps=False):
"""
Input Variables:
----------------
ndim: An integer indicating the number of dimensions of the expected input tensor.
num_features: An integer indicating the number of input feature dimensions.
eps: A scalar constant or learnable variable.
learnable_eps: A bool value indicating whether the eps is learnable.
"""
assert ndim in [3, 4, 5
], 'FilterResponseNorm only supports 3d, 4d or 5d inputs.'
super(FilterResponseNormNd, self).__init__()
shape = (1, num_features) + (1,) * (ndim - 2)
self.eps = nn.Parameter(torch.ones(*shape) * eps)
if not learnable_eps:
self.eps.requires_grad_(False)
self.gamma = nn.Parameter(torch.Tensor(*shape))
self.beta = nn.Parameter(torch.Tensor(*shape))
self.tau = nn.Parameter(torch.Tensor(*shape))
self.reset_parameters()
def forward(self, x):
avg_dims = tuple(range(2, x.dim()))
nu2 = torch.pow(x, 2).mean(dim=avg_dims, keepdim=True)
x = x * torch.rsqrt(nu2 + torch.abs(self.eps))
return torch.max(self.gamma * x + self.beta, self.tau)
def reset_parameters(self):
nn.init.ones_(self.gamma)
nn.init.zeros_(self.beta)
nn.init.zeros_(self.tau)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'ndim': 4, 'num_features': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_abs_add_maximum_mean_mul_pow_rsqrt_0(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp8 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr3 + x2, xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = 16.0
tmp7 = tmp5 / tmp6
tmp9 = tl_math.abs(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = libdevice.rsqrt(tmp10)
tmp13 = tmp0 * tmp11
tmp14 = tmp12 * tmp13
tmp16 = tmp14 + tmp15
tmp18 = triton_helpers.maximum(tmp16, tmp17)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp11, xmask)
tl.store(out_ptr0 + (r1 + 16 * x0), tmp18, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_4, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (1, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf0
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_abs_add_maximum_mean_mul_pow_rsqrt_0[grid(16)](buf1,
primals_1, primals_2, primals_3, primals_4, primals_5, buf2, 16,
16, XBLOCK=8, num_warps=2, num_stages=1)
del primals_2
return buf2, primals_1, primals_3, primals_4, primals_5, buf1
class FilterResponseNormNdNew(nn.Module):
def __init__(self, ndim, num_features, eps=1e-06, learnable_eps=False):
"""
Input Variables:
----------------
ndim: An integer indicating the number of dimensions of the expected input tensor.
num_features: An integer indicating the number of input feature dimensions.
eps: A scalar constant or learnable variable.
learnable_eps: A bool value indicating whether the eps is learnable.
"""
assert ndim in [3, 4, 5
], 'FilterResponseNorm only supports 3d, 4d or 5d inputs.'
super(FilterResponseNormNdNew, self).__init__()
shape = (1, num_features) + (1,) * (ndim - 2)
self.eps = nn.Parameter(torch.ones(*shape) * eps)
if not learnable_eps:
self.eps.requires_grad_(False)
self.gamma = nn.Parameter(torch.Tensor(*shape))
self.beta = nn.Parameter(torch.Tensor(*shape))
self.tau = nn.Parameter(torch.Tensor(*shape))
self.reset_parameters()
def reset_parameters(self):
nn.init.ones_(self.gamma)
nn.init.zeros_(self.beta)
nn.init.zeros_(self.tau)
def forward(self, input_0):
primals_2 = self.eps
primals_3 = self.gamma
primals_4 = self.beta
primals_5 = self.tau
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| aouedions11/SSFL-Benchmarking-Semi-supervised-Federated-Learning | FilterResponseNormNd | false | 6,234 | [
"MIT"
] | 1 | 78aec81919bf95ed4677d0e0a4ebbbe3be455742 | https://github.com/aouedions11/SSFL-Benchmarking-Semi-supervised-Federated-Learning/tree/78aec81919bf95ed4677d0e0a4ebbbe3be455742 | import torch
import torch.nn as nn
import torch.utils.data.distributed
class Model(nn.Module):
def __init__(self, ndim, num_features, eps=1e-06, learnable_eps=False):
"""
Input Variables:
----------------
ndim: An integer indicating the number of dimensions of the expected input tensor.
num_features: An integer indicating the number of input feature dimensions.
eps: A scalar constant or learnable variable.
learnable_eps: A bool value indicating whether the eps is learnable.
"""
assert ndim in [3, 4, 5
], 'FilterResponseNorm only supports 3d, 4d or 5d inputs.'
super().__init__()
shape = (1, num_features) + (1,) * (ndim - 2)
self.eps = nn.Parameter(torch.ones(*shape) * eps)
if not learnable_eps:
self.eps.requires_grad_(False)
self.gamma = nn.Parameter(torch.Tensor(*shape))
self.beta = nn.Parameter(torch.Tensor(*shape))
self.tau = nn.Parameter(torch.Tensor(*shape))
self.reset_parameters()
def forward(self, x):
avg_dims = tuple(range(2, x.dim()))
nu2 = torch.pow(x, 2).mean(dim=avg_dims, keepdim=True)
x = x * torch.rsqrt(nu2 + torch.abs(self.eps))
return torch.max(self.gamma * x + self.beta, self.tau)
def reset_parameters(self):
nn.init.ones_(self.gamma)
nn.init.zeros_(self.beta)
nn.init.zeros_(self.tau)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
PrimaryCapsules | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/jo/cjoo56yo7awtt6oev6cvdfwedqercpy7m6xu45f3pq6md6tems26.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x => convolution
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 50176
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 784) % 16
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/hc/chc6q7iii23tc2cpn33tbxkau5cudjchdwtlea2vukooao45wnjn.py
# Topologically Sorted Source Nodes: [pow_1, norm, add, truediv, mul, sqrt, add_1, x_2], Original ATen: [aten.pow, aten.sum, aten.add, aten.div, aten.mul, aten.sqrt]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# mul => mul
# norm => sum_1
# pow_1 => pow_1
# sqrt => sqrt
# truediv => div
# x_2 => div_1
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view, 2), kwargs = {})
# %sum_1 : [num_users=3] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [-1], True), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, %add), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %view), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%sum_1,), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt, 1e-08), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, %add_1), kwargs = {})
triton_poi_fused_add_div_mul_pow_sqrt_sum_1 = async_compile.triton('triton_poi_fused_add_div_mul_pow_sqrt_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_pow_sqrt_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mul_pow_sqrt_sum_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 50176
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp11 = 1.0
tmp12 = tmp10 + tmp11
tmp13 = tmp10 / tmp12
tmp15 = tmp13 * tmp14
tmp16 = libdevice.sqrt(tmp10)
tmp17 = 1e-08
tmp18 = tmp16 + tmp17
tmp19 = tmp15 / tmp18
tl.store(out_ptr0 + (x2), tmp19, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 64, 64), (16384, 4096, 64, 1))
assert_size_stride(primals_2, (16, 4, 9, 9), (324, 81, 9, 1))
assert_size_stride(primals_3, (16, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 28, 28), (12544, 784, 28, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf1, primals_3, 50176, grid=grid(50176), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((4, 3136, 4), (12544, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pow_1, norm, add, truediv, mul, sqrt, add_1, x_2], Original ATen: [aten.pow, aten.sum, aten.add, aten.div, aten.mul, aten.sqrt]
triton_poi_fused_add_div_mul_pow_sqrt_sum_1.run(buf1, buf2, 50176, grid=grid(50176), stream=stream0)
return (buf2, primals_1, primals_2, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 64, 64), (16384, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((16, 4, 9, 9), (324, 81, 9, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
def squash(x, dim=-1, epsilon=1e-08):
norm = (x ** 2).sum(dim=dim, keepdim=True)
x = norm / (norm + 1) * x / (torch.sqrt(norm) + epsilon)
return x
class PrimaryCapsules(nn.Module):
def __init__(self, in_features, capsules_num, capsules_dim):
super(PrimaryCapsules, self).__init__()
self.in_features = in_features
self.capsules_num = capsules_num
self.capsules_dim = capsules_dim
self.conv = nn.Conv2d(in_features, capsules_num * capsules_dim,
kernel_size=9, stride=2)
def forward(self, x):
batch_size = x.shape[0]
x = self.conv(x)
"""
Since all capsules use the same convolution operations, just do once and reshape.
"""
x = x.view(batch_size, -1, self.capsules_dim)
x = squash(x)
return x
def get_inputs():
return [torch.rand([4, 4, 64, 64])]
def get_init_inputs():
return [[], {'in_features': 4, 'capsules_num': 4, 'capsules_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 50176
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 784 % 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_div_mul_pow_sqrt_sum_1(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 50176
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp11 = 1.0
tmp12 = tmp10 + tmp11
tmp13 = tmp10 / tmp12
tmp15 = tmp13 * tmp14
tmp16 = libdevice.sqrt(tmp10)
tmp17 = 1e-08
tmp18 = tmp16 + tmp17
tmp19 = tmp15 / tmp18
tl.store(out_ptr0 + x2, tmp19, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 64, 64), (16384, 4096, 64, 1))
assert_size_stride(primals_2, (16, 4, 9, 9), (324, 81, 9, 1))
assert_size_stride(primals_3, (16,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(2,
2), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 28, 28), (12544, 784, 28, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(50176)](buf1, primals_3, 50176,
XBLOCK=512, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 3136, 4), (12544, 4, 1), torch.float32)
triton_poi_fused_add_div_mul_pow_sqrt_sum_1[grid(50176)](buf1, buf2,
50176, XBLOCK=256, num_warps=4, num_stages=1)
return buf2, primals_1, primals_2, buf1
def squash(x, dim=-1, epsilon=1e-08):
norm = (x ** 2).sum(dim=dim, keepdim=True)
x = norm / (norm + 1) * x / (torch.sqrt(norm) + epsilon)
return x
class PrimaryCapsulesNew(nn.Module):
def __init__(self, in_features, capsules_num, capsules_dim):
super(PrimaryCapsulesNew, self).__init__()
self.in_features = in_features
self.capsules_num = capsules_num
self.capsules_dim = capsules_dim
self.conv = nn.Conv2d(in_features, capsules_num * capsules_dim,
kernel_size=9, stride=2)
def forward(self, input_0):
primals_2 = self.conv.weight
primals_3 = self.conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| ashawkey/CapsNet.pytorch | PrimaryCapsules | false | 6,235 | [
"MIT"
] | 1 | 3b796b572bbabe79cc445c35913cd3584733aedf | https://github.com/ashawkey/CapsNet.pytorch/tree/3b796b572bbabe79cc445c35913cd3584733aedf | import torch
import torch.nn as nn
def squash(x, dim=-1, epsilon=1e-08):
norm = (x ** 2).sum(dim=dim, keepdim=True)
x = norm / (norm + 1) * x / (torch.sqrt(norm) + epsilon)
return x
class Model(nn.Module):
def __init__(self, in_features, capsules_num, capsules_dim):
super().__init__()
self.in_features = in_features
self.capsules_num = capsules_num
self.capsules_dim = capsules_dim
self.conv = nn.Conv2d(in_features, capsules_num * capsules_dim,
kernel_size=9, stride=2)
def forward(self, x):
batch_size = x.shape[0]
x = self.conv(x)
"""
Since all capsules use the same convolution operations, just do once and reshape.
"""
x = x.view(batch_size, -1, self.capsules_dim)
x = squash(x)
return x
def get_inputs():
return [torch.rand([4, 4, 64, 64])]
def get_init_inputs():
return [4, 4, 4]
|
MNL | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/j4/cj4rzuuszv3uecdumz6l776uuhxdahbpifl74f5qoc42wjjzlvjk.py
# Topologically Sorted Source Nodes: [mul, sub, mul_1, u, p], Original ATen: [aten.mul, aten.rsub, aten.add, aten._softmax]
# Source node to ATen node mapping:
# mul => mul
# mul_1 => mul_1
# p => amax, exp, sub_1, sum_1
# sub => sub
# u => add
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %primals_1), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %primals_2), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, -16), kwargs = {})
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add, [1], True), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
triton_poi_fused__softmax_add_mul_rsub_0 = async_compile.triton('triton_poi_fused__softmax_add_mul_rsub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_mul_rsub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_add_mul_rsub_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = (xindex // 16)
x3 = xindex % 16
x0 = xindex % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x3 + (64*x2)), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (16 + x3 + (64*x2)), xmask)
tmp14 = tl.load(in_ptr0 + (32 + x3 + (64*x2)), xmask)
tmp20 = tl.load(in_ptr0 + (48 + x3 + (64*x2)), xmask)
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp3 - tmp0
tmp5 = -16.0
tmp6 = tmp4 * tmp5
tmp7 = tmp2 + tmp6
tmp9 = tmp8 * tmp1
tmp10 = tmp3 - tmp8
tmp11 = tmp10 * tmp5
tmp12 = tmp9 + tmp11
tmp13 = triton_helpers.maximum(tmp7, tmp12)
tmp15 = tmp14 * tmp1
tmp16 = tmp3 - tmp14
tmp17 = tmp16 * tmp5
tmp18 = tmp15 + tmp17
tmp19 = triton_helpers.maximum(tmp13, tmp18)
tmp21 = tmp20 * tmp1
tmp22 = tmp3 - tmp20
tmp23 = tmp22 * tmp5
tmp24 = tmp21 + tmp23
tmp25 = triton_helpers.maximum(tmp19, tmp24)
tmp26 = tmp7 - tmp25
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp12 - tmp25
tmp29 = tl_math.exp(tmp28)
tmp30 = tmp27 + tmp29
tmp31 = tmp18 - tmp25
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp30 + tmp32
tmp34 = tmp24 - tmp25
tmp35 = tl_math.exp(tmp34)
tmp36 = tmp33 + tmp35
tl.store(out_ptr0 + (x4), tmp25, xmask)
tl.store(out_ptr1 + (x4), tmp36, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/gc/cgczns5ri4wjc5eyelqtmakr7cyybwdqua77zznhsmahqlw53mkh.py
# Topologically Sorted Source Nodes: [mul, sub, mul_1, u, p, sum_1, truediv, log], Original ATen: [aten.mul, aten.rsub, aten.add, aten._softmax, aten.sum, aten.div, aten.log]
# Source node to ATen node mapping:
# log => log
# mul => mul
# mul_1 => mul_1
# p => amax, div, exp, sub_1
# sub => sub
# sum_1 => sum_2
# truediv => div_1
# u => add
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %primals_1), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %primals_2), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, -16), kwargs = {})
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add, [1], True), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
# %sum_2 : [num_users=2] = call_function[target=torch.ops.aten.sum.default](args = (%div,), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%div, %sum_2), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%div_1,), kwargs = {})
triton_per_fused__softmax_add_div_log_mul_rsub_sum_1 = async_compile.triton('triton_per_fused__softmax_add_div_log_mul_rsub_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {6: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 7), equal_to_1=(6,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_add_div_log_mul_rsub_sum_1', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__softmax_add_div_log_mul_rsub_sum_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r4 = rindex
r0 = rindex % 4
r3 = (rindex // 64)
r5 = rindex % 16
tmp0 = tl.load(in_ptr0 + (r4), None)
tmp1 = tl.load(in_ptr1 + (r0), None, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + (r5 + (16*r3)), None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr3 + (r5 + (16*r3)), None, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp3 - tmp0
tmp5 = -16.0
tmp6 = tmp4 * tmp5
tmp7 = tmp2 + tmp6
tmp9 = tmp7 - tmp8
tmp10 = tl_math.exp(tmp9)
tmp12 = tmp10 / tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = tmp12 / tmp15
tmp17 = tl_math.log(tmp16)
tl.store(out_ptr1 + (tl.broadcast_to(r4, [RBLOCK])), tmp17, None)
tl.store(out_ptr0 + (tl.full([1], 0, tl.int32)), tmp15, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, ), (1, ))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, sub, mul_1, u, p], Original ATen: [aten.mul, aten.rsub, aten.add, aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_add_mul_rsub_0.run(primals_2, primals_1, buf0, buf1, 64, grid=grid(64), stream=stream0)
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, sub, mul_1, u, p, sum_1, truediv, log], Original ATen: [aten.mul, aten.rsub, aten.add, aten._softmax, aten.sum, aten.div, aten.log]
triton_per_fused__softmax_add_div_log_mul_rsub_sum_1.run(primals_2, primals_1, buf0, buf1, buf2, buf3, 1, 256, grid=grid(1), stream=stream0)
del buf0
del buf1
return (buf3, primals_1, primals_2, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
class MNL(nn.Module):
"""
Implementation of MNL choice model as a Pytorch module
"""
def __init__(self, n):
super(MNL, self).__init__()
self.u = nn.Parameter(torch.nn.init.normal(torch.Tensor(n)))
self.n = n
self.m = nn.Softmax()
def forward(self, x):
"""
computes the PCMC choice probabilities P(.,S)
Args:
x- indicator vector for choice set S, i.e. a 'size(S)-hot' encoding of the
choice set, or a batch of such encodings
"""
u = x * self.u + (1 - x) * -16
p = self.m(u)
return torch.log(p / torch.sum(p))
def __str__(self):
return 'MNL'
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__softmax_add_mul_rsub_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16
x3 = xindex % 16
x0 = xindex % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x3 + 64 * x2), xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (16 + x3 + 64 * x2), xmask)
tmp14 = tl.load(in_ptr0 + (32 + x3 + 64 * x2), xmask)
tmp20 = tl.load(in_ptr0 + (48 + x3 + 64 * x2), xmask)
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp3 - tmp0
tmp5 = -16.0
tmp6 = tmp4 * tmp5
tmp7 = tmp2 + tmp6
tmp9 = tmp8 * tmp1
tmp10 = tmp3 - tmp8
tmp11 = tmp10 * tmp5
tmp12 = tmp9 + tmp11
tmp13 = triton_helpers.maximum(tmp7, tmp12)
tmp15 = tmp14 * tmp1
tmp16 = tmp3 - tmp14
tmp17 = tmp16 * tmp5
tmp18 = tmp15 + tmp17
tmp19 = triton_helpers.maximum(tmp13, tmp18)
tmp21 = tmp20 * tmp1
tmp22 = tmp3 - tmp20
tmp23 = tmp22 * tmp5
tmp24 = tmp21 + tmp23
tmp25 = triton_helpers.maximum(tmp19, tmp24)
tmp26 = tmp7 - tmp25
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp12 - tmp25
tmp29 = tl_math.exp(tmp28)
tmp30 = tmp27 + tmp29
tmp31 = tmp18 - tmp25
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp30 + tmp32
tmp34 = tmp24 - tmp25
tmp35 = tl_math.exp(tmp34)
tmp36 = tmp33 + tmp35
tl.store(out_ptr0 + x4, tmp25, xmask)
tl.store(out_ptr1 + x4, tmp36, xmask)
@triton.jit
def triton_per_fused__softmax_add_div_log_mul_rsub_sum_1(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r4 = rindex
r0 = rindex % 4
r3 = rindex // 64
r5 = rindex % 16
tmp0 = tl.load(in_ptr0 + r4, None)
tmp1 = tl.load(in_ptr1 + r0, None, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + (r5 + 16 * r3), None, eviction_policy='evict_last'
)
tmp11 = tl.load(in_ptr3 + (r5 + 16 * r3), None, eviction_policy=
'evict_last')
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp3 - tmp0
tmp5 = -16.0
tmp6 = tmp4 * tmp5
tmp7 = tmp2 + tmp6
tmp9 = tmp7 - tmp8
tmp10 = tl_math.exp(tmp9)
tmp12 = tmp10 / tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = tmp12 / tmp15
tmp17 = tl_math.log(tmp16)
tl.store(out_ptr1 + tl.broadcast_to(r4, [RBLOCK]), tmp17, None)
tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp15, None)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_add_mul_rsub_0[grid(64)](primals_2,
primals_1, buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_per_fused__softmax_add_div_log_mul_rsub_sum_1[grid(1)](primals_2
, primals_1, buf0, buf1, buf2, buf3, 1, 256, num_warps=2,
num_stages=1)
del buf0
del buf1
return buf3, primals_1, primals_2, buf2
class MNLNew(nn.Module):
"""
Implementation of MNL choice model as a Pytorch module
"""
def __init__(self, n):
super(MNLNew, self).__init__()
self.u = nn.Parameter(torch.nn.init.normal(torch.Tensor(n)))
self.n = n
self.m = nn.Softmax()
def __str__(self):
return 'MNL'
def forward(self, input_0):
primals_1 = self.u
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
| arjunsesh/lrr-neurips | MNL | false | 6,236 | [
"MIT"
] | 1 | d78106daec1e729b02a0452f74a37bf004ed243c | https://github.com/arjunsesh/lrr-neurips/tree/d78106daec1e729b02a0452f74a37bf004ed243c | import torch
import torch.nn as nn
import torch.utils.data
class Model(nn.Module):
"""
Implementation of MNL choice model as a Pytorch module
"""
def __init__(self, n):
super().__init__()
self.u = nn.Parameter(torch.nn.init.normal(torch.Tensor(n)))
self.n = n
self.m = nn.Softmax()
def forward(self, x):
"""
computes the PCMC choice probabilities P(.,S)
Args:
x- indicator vector for choice set S, i.e. a 'size(S)-hot' encoding of the
choice set, or a batch of such encodings
"""
u = x * self.u + (1 - x) * -16
p = self.m(u)
return torch.log(p / torch.sum(p))
def __str__(self):
return 'MNL'
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
ConcatELU | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ku/ckut64acx4ooap3xsoegn7zb77bu42nhykcgacotvo5y4z7hgjfr.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%where, %where_1], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 8
x0 = xindex % 16
x2 = (xindex // 128)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0)
tmp6 = 0.0
tmp7 = tmp5 > tmp6
tmp8 = 1.0
tmp9 = tmp5 * tmp8
tmp10 = libdevice.expm1(tmp9)
tmp11 = tmp10 * tmp8
tmp12 = tl.where(tmp7, tmp9, tmp11)
tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype)
tmp14 = tl.where(tmp4, tmp12, tmp13)
tmp15 = tmp0 >= tmp3
tmp16 = tl.full([1], 8, tl.int64)
tmp17 = tmp0 < tmp16
tmp18 = tl.load(in_ptr0 + (x0 + (16*((-4) + x1)) + (64*x2)), tmp15 & xmask, other=0.0)
tmp19 = -tmp18
tmp20 = tmp19 > tmp6
tmp21 = tmp19 * tmp8
tmp22 = libdevice.expm1(tmp21)
tmp23 = tmp22 * tmp8
tmp24 = tl.where(tmp20, tmp21, tmp23)
tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype)
tmp26 = tl.where(tmp15, tmp24, tmp25)
tmp27 = tl.where(tmp4, tmp14, tmp26)
tl.store(out_ptr0 + (x3), tmp27, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(arg0_1, buf0, 512, grid=grid(512), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class ConcatELU(nn.Module):
"""Activation function that applies ELU in both direction (inverted and plain).
Allows non-linearity while providing strong gradients for any input (important for final convolution)
"""
def forward(self, x):
return torch.cat([F.elu(x), F.elu(-x)], dim=1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 8
x0 = xindex % 16
x2 = xindex // 128
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp6 = 0.0
tmp7 = tmp5 > tmp6
tmp8 = 1.0
tmp9 = tmp5 * tmp8
tmp10 = libdevice.expm1(tmp9)
tmp11 = tmp10 * tmp8
tmp12 = tl.where(tmp7, tmp9, tmp11)
tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype)
tmp14 = tl.where(tmp4, tmp12, tmp13)
tmp15 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp18 = tl.load(in_ptr0 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp15 &
xmask, other=0.0)
tmp19 = -tmp18
tmp20 = tmp19 > tmp6
tmp21 = tmp19 * tmp8
tmp22 = libdevice.expm1(tmp21)
tmp23 = tmp22 * tmp8
tmp24 = tl.where(tmp20, tmp21, tmp23)
tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype)
tmp26 = tl.where(tmp15, tmp24, tmp25)
tmp27 = tl.where(tmp4, tmp14, tmp26)
tl.store(out_ptr0 + x3, tmp27, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](arg0_1, buf0, 512, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class ConcatELUNew(nn.Module):
"""Activation function that applies ELU in both direction (inverted and plain).
Allows non-linearity while providing strong gradients for any input (important for final convolution)
"""
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| ashutoshml/lightning-tutorials | ConcatELU | false | 6,237 | [
"Apache-2.0"
] | 1 | 898b8b6f9852c0b80f034a3187bc1cd34dd521ce | https://github.com/ashutoshml/lightning-tutorials/tree/898b8b6f9852c0b80f034a3187bc1cd34dd521ce | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
"""Activation function that applies ELU in both direction (inverted and plain).
Allows non-linearity while providing strong gradients for any input (important for final convolution)
"""
def forward(self, x):
return torch.cat([F.elu(x), F.elu(-x)], dim=1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
MLP | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/bd/cbdotac6ukup5jfyef3gol4xzuff4mzk4u5pqfhzizchbce25ivd.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# x_1 => gt, mul, where
# Graph fragment:
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.1), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_1, %mul), kwargs = {})
triton_poi_fused_leaky_relu_0 = async_compile.triton('triton_poi_fused_leaky_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr1 + (x2), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_poi_fused_leaky_relu_0.run(buf0, primals_2, buf1, buf2, 256, grid=grid(256), stream=stream0)
del buf0
del primals_2
return (buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class SharedDropout(nn.Module):
"""
SharedDropout differs from the vanilla dropout strategy in that
the dropout mask is shared across one dimension.
Args:
p (float):
The probability of an element to be zeroed. Default: 0.5.
batch_first (bool):
If ``True``, the input and output tensors are provided as ``[batch_size, seq_len, *]``.
Default: ``True``.
Examples:
>>> x = torch.ones(1, 3, 5)
>>> nn.Dropout()(x)
tensor([[[0., 2., 2., 0., 0.],
[2., 2., 0., 2., 2.],
[2., 2., 2., 2., 0.]]])
>>> SharedDropout()(x)
tensor([[[2., 0., 2., 0., 2.],
[2., 0., 2., 0., 2.],
[2., 0., 2., 0., 2.]]])
"""
def __init__(self, p=0.5, batch_first=True):
super().__init__()
self.p = p
self.batch_first = batch_first
def __repr__(self):
s = f'p={self.p}'
if self.batch_first:
s += f', batch_first={self.batch_first}'
return f'{self.__class__.__name__}({s})'
def forward(self, x):
"""
Args:
x (~torch.Tensor):
A tensor of any shape.
Returns:
The returned tensor is of the same shape as `x`.
"""
if self.training:
if self.batch_first:
mask = self.get_mask(x[:, 0], self.p).unsqueeze(1)
else:
mask = self.get_mask(x[0], self.p)
x = x * mask
return x
@staticmethod
def get_mask(x, p):
return x.new_empty(x.shape).bernoulli_(1 - p) / (1 - p)
class MLP(nn.Module):
"""
Applies a linear transformation together with a non-linear activation to the incoming tensor:
:math:`y = \\mathrm{Activation}(x A^T + b)`
Args:
n_in (~torch.Tensor):
The size of each input feature.
n_out (~torch.Tensor):
The size of each output feature.
dropout (float):
If non-zero, introduce a :class:`SharedDropout` layer on the output with this dropout ratio. Default: 0.
activation (bool):
Whether to use activations. Default: True.
"""
def __init__(self, n_in, n_out, dropout=0, activation=True):
super().__init__()
self.n_in = n_in
self.n_out = n_out
self.linear = nn.Linear(n_in, n_out)
self.activation = nn.LeakyReLU(negative_slope=0.1
) if activation else nn.Identity()
self.dropout = SharedDropout(p=dropout)
self.reset_parameters()
def __repr__(self):
s = f'n_in={self.n_in}, n_out={self.n_out}'
if self.dropout.p > 0:
s += f', dropout={self.dropout.p}'
return f'{self.__class__.__name__}({s})'
def reset_parameters(self):
nn.init.orthogonal_(self.linear.weight)
nn.init.zeros_(self.linear.bias)
def forward(self, x):
"""
Args:
x (~torch.Tensor):
The size of each input feature is `n_in`.
Returns:
A tensor with the size of each output feature `n_out`.
"""
x = self.linear(x)
x = self.activation(x)
x = self.dropout(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_in': 4, 'n_out': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_leaky_relu_0[grid(256)](buf0, primals_2, buf1,
buf2, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf0
del primals_2
return buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1
class SharedDropout(nn.Module):
"""
SharedDropout differs from the vanilla dropout strategy in that
the dropout mask is shared across one dimension.
Args:
p (float):
The probability of an element to be zeroed. Default: 0.5.
batch_first (bool):
If ``True``, the input and output tensors are provided as ``[batch_size, seq_len, *]``.
Default: ``True``.
Examples:
>>> x = torch.ones(1, 3, 5)
>>> nn.Dropout()(x)
tensor([[[0., 2., 2., 0., 0.],
[2., 2., 0., 2., 2.],
[2., 2., 2., 2., 0.]]])
>>> SharedDropout()(x)
tensor([[[2., 0., 2., 0., 2.],
[2., 0., 2., 0., 2.],
[2., 0., 2., 0., 2.]]])
"""
def __init__(self, p=0.5, batch_first=True):
super().__init__()
self.p = p
self.batch_first = batch_first
def __repr__(self):
s = f'p={self.p}'
if self.batch_first:
s += f', batch_first={self.batch_first}'
return f'{self.__class__.__name__}({s})'
def forward(self, x):
"""
Args:
x (~torch.Tensor):
A tensor of any shape.
Returns:
The returned tensor is of the same shape as `x`.
"""
if self.training:
if self.batch_first:
mask = self.get_mask(x[:, 0], self.p).unsqueeze(1)
else:
mask = self.get_mask(x[0], self.p)
x = x * mask
return x
@staticmethod
def get_mask(x, p):
return x.new_empty(x.shape).bernoulli_(1 - p) / (1 - p)
class MLPNew(nn.Module):
"""
Applies a linear transformation together with a non-linear activation to the incoming tensor:
:math:`y = \\mathrm{Activation}(x A^T + b)`
Args:
n_in (~torch.Tensor):
The size of each input feature.
n_out (~torch.Tensor):
The size of each output feature.
dropout (float):
If non-zero, introduce a :class:`SharedDropout` layer on the output with this dropout ratio. Default: 0.
activation (bool):
Whether to use activations. Default: True.
"""
def __init__(self, n_in, n_out, dropout=0, activation=True):
super().__init__()
self.n_in = n_in
self.n_out = n_out
self.linear = nn.Linear(n_in, n_out)
self.activation = nn.LeakyReLU(negative_slope=0.1
) if activation else nn.Identity()
self.dropout = SharedDropout(p=dropout)
self.reset_parameters()
def __repr__(self):
s = f'n_in={self.n_in}, n_out={self.n_out}'
if self.dropout.p > 0:
s += f', dropout={self.dropout.p}'
return f'{self.__class__.__name__}({s})'
def reset_parameters(self):
nn.init.orthogonal_(self.linear.weight)
nn.init.zeros_(self.linear.bias)
def forward(self, input_0):
primals_1 = self.linear.weight
primals_2 = self.linear.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| ashim95/parser | MLP | false | 6,238 | [
"MIT"
] | 1 | 61e9cd6bf16dcf1aa2b9d51b3a6c04ed048b3199 | https://github.com/ashim95/parser/tree/61e9cd6bf16dcf1aa2b9d51b3a6c04ed048b3199 | import torch
import torch.nn as nn
class SharedDropout(nn.Module):
"""
SharedDropout differs from the vanilla dropout strategy in that
the dropout mask is shared across one dimension.
Args:
p (float):
The probability of an element to be zeroed. Default: 0.5.
batch_first (bool):
If ``True``, the input and output tensors are provided as ``[batch_size, seq_len, *]``.
Default: ``True``.
Examples:
>>> x = torch.ones(1, 3, 5)
>>> nn.Dropout()(x)
tensor([[[0., 2., 2., 0., 0.],
[2., 2., 0., 2., 2.],
[2., 2., 2., 2., 0.]]])
>>> SharedDropout()(x)
tensor([[[2., 0., 2., 0., 2.],
[2., 0., 2., 0., 2.],
[2., 0., 2., 0., 2.]]])
"""
def __init__(self, p=0.5, batch_first=True):
super().__init__()
self.p = p
self.batch_first = batch_first
def __repr__(self):
s = f'p={self.p}'
if self.batch_first:
s += f', batch_first={self.batch_first}'
return f'{self.__class__.__name__}({s})'
def forward(self, x):
"""
Args:
x (~torch.Tensor):
A tensor of any shape.
Returns:
The returned tensor is of the same shape as `x`.
"""
if self.training:
if self.batch_first:
mask = self.get_mask(x[:, 0], self.p).unsqueeze(1)
else:
mask = self.get_mask(x[0], self.p)
x = x * mask
return x
@staticmethod
def get_mask(x, p):
return x.new_empty(x.shape).bernoulli_(1 - p) / (1 - p)
class Model(nn.Module):
"""
Applies a linear transformation together with a non-linear activation to the incoming tensor:
:math:`y = \\mathrm{Activation}(x A^T + b)`
Args:
n_in (~torch.Tensor):
The size of each input feature.
n_out (~torch.Tensor):
The size of each output feature.
dropout (float):
If non-zero, introduce a :class:`SharedDropout` layer on the output with this dropout ratio. Default: 0.
activation (bool):
Whether to use activations. Default: True.
"""
def __init__(self, n_in, n_out, dropout=0, activation=True):
super().__init__()
self.n_in = n_in
self.n_out = n_out
self.linear = nn.Linear(n_in, n_out)
self.activation = nn.LeakyReLU(negative_slope=0.1
) if activation else nn.Identity()
self.dropout = SharedDropout(p=dropout)
self.reset_parameters()
def __repr__(self):
s = f'n_in={self.n_in}, n_out={self.n_out}'
if self.dropout.p > 0:
s += f', dropout={self.dropout.p}'
return f'{self.__class__.__name__}({s})'
def reset_parameters(self):
nn.init.orthogonal_(self.linear.weight)
nn.init.zeros_(self.linear.bias)
def forward(self, x):
"""
Args:
x (~torch.Tensor):
The size of each input feature is `n_in`.
Returns:
A tensor with the size of each output feature `n_out`.
"""
x = self.linear(x)
x = self.activation(x)
x = self.dropout(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
BP | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/am/camfuqgvxy6bycif5dr657ji4zny6dgxoml6xmwbj4la6mrka2dq.py
# Topologically Sorted Source Nodes: [mul, sub, mul_1, utils_3, log_softmax], Original ATen: [aten.mul, aten.rsub, aten.add, aten._log_softmax]
# Source node to ATen node mapping:
# log_softmax => amax, exp, sub_1, sum_1
# mul => mul
# mul_1 => mul_1
# sub => sub
# utils_3 => add
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %view_8), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %primals_2), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, -16), kwargs = {})
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add, [1], True), kwargs = {})
# %sub_1 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %amax), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
triton_poi_fused__log_softmax_add_mul_rsub_0 = async_compile.triton('triton_poi_fused__log_softmax_add_mul_rsub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_add_mul_rsub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_add_mul_rsub_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + (64*x1)), xmask)
tmp8 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask)
tmp9 = tl.load(in_ptr1 + (16 + x0 + (64*x1)), xmask)
tmp15 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask)
tmp16 = tl.load(in_ptr1 + (32 + x0 + (64*x1)), xmask)
tmp22 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask)
tmp23 = tl.load(in_ptr1 + (48 + x0 + (64*x1)), xmask)
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp3 - tmp0
tmp5 = -16.0
tmp6 = tmp4 * tmp5
tmp7 = tmp2 + tmp6
tmp10 = tmp8 * tmp9
tmp11 = tmp3 - tmp8
tmp12 = tmp11 * tmp5
tmp13 = tmp10 + tmp12
tmp14 = triton_helpers.maximum(tmp7, tmp13)
tmp17 = tmp15 * tmp16
tmp18 = tmp3 - tmp15
tmp19 = tmp18 * tmp5
tmp20 = tmp17 + tmp19
tmp21 = triton_helpers.maximum(tmp14, tmp20)
tmp24 = tmp22 * tmp23
tmp25 = tmp3 - tmp22
tmp26 = tmp25 * tmp5
tmp27 = tmp24 + tmp26
tmp28 = triton_helpers.maximum(tmp21, tmp27)
tmp29 = tmp7 - tmp28
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp13 - tmp28
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp30 + tmp32
tmp34 = tmp20 - tmp28
tmp35 = tl_math.exp(tmp34)
tmp36 = tmp33 + tmp35
tmp37 = tmp27 - tmp28
tmp38 = tl_math.exp(tmp37)
tmp39 = tmp36 + tmp38
tl.store(out_ptr0 + (x2), tmp28, xmask)
tl.store(out_ptr1 + (x2), tmp39, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ci/cciwcvrwy24tefjdtz5snbseko4ourit3a6lgazrjpzviddyu4tb.py
# Topologically Sorted Source Nodes: [mul, sub, mul_1, utils_3, log_softmax], Original ATen: [aten.mul, aten.rsub, aten.add, aten._log_softmax]
# Source node to ATen node mapping:
# log_softmax => amax, log, sub_1, sub_2
# mul => mul
# mul_1 => mul_1
# sub => sub
# utils_3 => add
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %view_8), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %primals_2), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, -16), kwargs = {})
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add, [1], True), kwargs = {})
# %sub_1 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %amax), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_1, %log), kwargs = {})
triton_poi_fused__log_softmax_add_mul_rsub_1 = async_compile.triton('triton_poi_fused__log_softmax_add_mul_rsub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_add_mul_rsub_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_add_mul_rsub_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_out_ptr0 + (x3), xmask)
tmp8 = tl.load(in_ptr1 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp3 - tmp0
tmp5 = -16.0
tmp6 = tmp4 * tmp5
tmp7 = tmp2 + tmp6
tmp9 = tmp7 - tmp8
tmp11 = tl_math.log(tmp10)
tmp12 = tmp9 - tmp11
tl.store(in_out_ptr0 + (x3), tmp12, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [utils], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(primals_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_2, (16, 4, 4), (16, 4, 1), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [utils_1], Original ATen: [aten.bmm]
extern_kernels.bmm(buf0, reinterpret_tensor(primals_2, (16, 4, 4), (16, 4, 1), 0), out=buf1)
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [utils_2], Original ATen: [aten.bmm]
extern_kernels.bmm(buf1, reinterpret_tensor(primals_2, (16, 4, 4), (16, 4, 1), 0), out=buf2)
del buf1
buf3 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, sub, mul_1, utils_3, log_softmax], Original ATen: [aten.mul, aten.rsub, aten.add, aten._log_softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__log_softmax_add_mul_rsub_0.run(primals_2, buf2, buf3, buf4, 64, grid=grid(64), stream=stream0)
buf5 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [mul, sub, mul_1, utils_3, log_softmax], Original ATen: [aten.mul, aten.rsub, aten.add, aten._log_softmax]
triton_poi_fused__log_softmax_add_mul_rsub_1.run(buf5, primals_2, buf3, buf4, 256, grid=grid(256), stream=stream0)
del buf3
del buf4
return (buf5, primals_2, buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
class BP(nn.Module):
"""
Implementation of the Bastell-Polking k-th order model as a pytorch module
"""
def __init__(self, n, k, d):
"""
Initializes a k-th order Batsell-Polking model
Args:
n- number of items in universe
k- order of the model
d- rank of the model
"""
super(BP, self).__init__()
shape = tuple([n] * k)
self.U = nn.Parameter(torch.nn.init.normal_(torch.Tensor(*shape)))
self.k = k
self.n = n
self.d = d
self.m = nn.LogSoftmax()
def forward(self, x):
"""
Computes choice probabilities for k-th order BP model
"""
utils = self.U
for _ in range(self.k - 1):
utils = torch.matmul(utils, torch.squeeze(x))
utils = x * utils + (1 - x) * -16
return self.m(utils)
def __str__(self):
return 'BP:k=' + str(self.k)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n': 4, 'k': 4, 'd': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__log_softmax_add_mul_rsub_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask)
tmp8 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp9 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask)
tmp15 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp16 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask)
tmp22 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp23 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask)
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp3 - tmp0
tmp5 = -16.0
tmp6 = tmp4 * tmp5
tmp7 = tmp2 + tmp6
tmp10 = tmp8 * tmp9
tmp11 = tmp3 - tmp8
tmp12 = tmp11 * tmp5
tmp13 = tmp10 + tmp12
tmp14 = triton_helpers.maximum(tmp7, tmp13)
tmp17 = tmp15 * tmp16
tmp18 = tmp3 - tmp15
tmp19 = tmp18 * tmp5
tmp20 = tmp17 + tmp19
tmp21 = triton_helpers.maximum(tmp14, tmp20)
tmp24 = tmp22 * tmp23
tmp25 = tmp3 - tmp22
tmp26 = tmp25 * tmp5
tmp27 = tmp24 + tmp26
tmp28 = triton_helpers.maximum(tmp21, tmp27)
tmp29 = tmp7 - tmp28
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp13 - tmp28
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp30 + tmp32
tmp34 = tmp20 - tmp28
tmp35 = tl_math.exp(tmp34)
tmp36 = tmp33 + tmp35
tmp37 = tmp27 - tmp28
tmp38 = tl_math.exp(tmp37)
tmp39 = tmp36 + tmp38
tl.store(out_ptr0 + x2, tmp28, xmask)
tl.store(out_ptr1 + x2, tmp39, xmask)
@triton.jit
def triton_poi_fused__log_softmax_add_mul_rsub_1(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_out_ptr0 + x3, xmask)
tmp8 = tl.load(in_ptr1 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr2 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp3 - tmp0
tmp5 = -16.0
tmp6 = tmp4 * tmp5
tmp7 = tmp2 + tmp6
tmp9 = tmp7 - tmp8
tmp11 = tl_math.log(tmp10)
tmp12 = tmp9 - tmp11
tl.store(in_out_ptr0 + x3, tmp12, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_1, (16, 4, 4), (16, 4,
1), 0), reinterpret_tensor(primals_2, (16, 4, 4), (16, 4, 1), 0
), out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf0, reinterpret_tensor(primals_2, (16, 4, 4),
(16, 4, 1), 0), out=buf1)
buf2 = buf0
del buf0
extern_kernels.bmm(buf1, reinterpret_tensor(primals_2, (16, 4, 4),
(16, 4, 1), 0), out=buf2)
del buf1
buf3 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_add_mul_rsub_0[grid(64)](primals_2,
buf2, buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf5 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused__log_softmax_add_mul_rsub_1[grid(256)](buf5,
primals_2, buf3, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf3
del buf4
return buf5, primals_2, buf5
class BPNew(nn.Module):
"""
Implementation of the Bastell-Polking k-th order model as a pytorch module
"""
def __init__(self, n, k, d):
"""
Initializes a k-th order Batsell-Polking model
Args:
n- number of items in universe
k- order of the model
d- rank of the model
"""
super(BPNew, self).__init__()
shape = tuple([n] * k)
self.U = nn.Parameter(torch.nn.init.normal_(torch.Tensor(*shape)))
self.k = k
self.n = n
self.d = d
self.m = nn.LogSoftmax()
def __str__(self):
return 'BP:k=' + str(self.k)
def forward(self, input_0):
primals_1 = self.U
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
| arjunsesh/lrr-neurips | BP | false | 6,239 | [
"MIT"
] | 1 | d78106daec1e729b02a0452f74a37bf004ed243c | https://github.com/arjunsesh/lrr-neurips/tree/d78106daec1e729b02a0452f74a37bf004ed243c | import torch
import torch.nn as nn
import torch.utils.data
class Model(nn.Module):
"""
Implementation of the Bastell-Polking k-th order model as a pytorch module
"""
def __init__(self, n, k, d):
"""
Initializes a k-th order Batsell-Polking model
Args:
n- number of items in universe
k- order of the model
d- rank of the model
"""
super().__init__()
shape = tuple([n] * k)
self.U = nn.Parameter(torch.nn.init.normal_(torch.Tensor(*shape)))
self.k = k
self.n = n
self.d = d
self.m = nn.LogSoftmax()
def forward(self, x):
"""
Computes choice probabilities for k-th order BP model
"""
utils = self.U
for _ in range(self.k - 1):
utils = torch.matmul(utils, torch.squeeze(x))
utils = x * utils + (1 - x) * -16
return self.m(utils)
def __str__(self):
return 'BP:k=' + str(self.k)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
ScalarMix | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/2d/c2durcbnmxziv6ccmw4s5yb6zlayyf7itflrryxft2ddkko7o6q4.py
# Topologically Sorted Source Nodes: [mul, weighted_sum, mul_1], Original ATen: [aten.mul, aten.add]
# Source node to ATen node mapping:
# mul => mul
# mul_1 => mul_1
# weighted_sum => add
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%select, %select_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, %add), kwargs = {})
triton_poi_fused_add_mul_0 = async_compile.triton('triton_poi_fused_add_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (0))
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + (0))
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp7 = tl.load(in_ptr2 + (x0), xmask)
tmp4 = tmp3 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp5 / tmp5
tmp8 = tmp6 * tmp7
tmp9 = 0.0
tmp10 = tmp8 + tmp9
tmp11 = tmp1 * tmp10
tl.store(out_ptr0 + (x0), tmp11, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1, ), (1, ))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, weighted_sum, mul_1], Original ATen: [aten.mul, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_0.run(primals_3, primals_1, primals_2, buf0, 64, grid=grid(64), stream=stream0)
return (buf0, primals_1, primals_3, reinterpret_tensor(primals_2, (4, 4, 4), (16, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class ScalarMix(nn.Module):
"""
Computes a parameterised scalar mixture of :math:`N` tensors, :math:`mixture = \\gamma * \\sum_{k}(s_k * tensor_k)`
where :math:`s = \\mathrm{softmax}(w)`, with :math:`w` and :math:`\\gamma` scalar parameters.
Args:
n_layers (int):
The number of layers to be mixed, i.e., :math:`N`.
dropout (float):
The dropout ratio of the layer weights.
If dropout > 0, then for each scalar weight, adjust its softmax weight mass to 0
with the dropout probability (i.e., setting the unnormalized weight to -inf).
This effectively redistributes the dropped probability mass to all other weights.
Default: 0.
"""
def __init__(self, n_layers, dropout=0):
super().__init__()
self.n_layers = n_layers
self.weights = nn.Parameter(torch.zeros(n_layers))
self.gamma = nn.Parameter(torch.tensor([1.0]))
self.dropout = nn.Dropout(dropout)
def __repr__(self):
s = f'n_layers={self.n_layers}'
if self.dropout.p > 0:
s += f', dropout={self.dropout.p}'
return f'{self.__class__.__name__}({s})'
def forward(self, tensors):
"""
Args:
tensors (list[~torch.Tensor]):
:math:`N` tensors to be mixed.
Returns:
The mixture of :math:`N` tensors.
"""
normed_weights = self.dropout(self.weights.softmax(-1))
weighted_sum = sum(w * h for w, h in zip(normed_weights, tensors))
return self.gamma * weighted_sum
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_layers': 1}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp7 = tl.load(in_ptr2 + x0, xmask)
tmp4 = tmp3 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp5 / tmp5
tmp8 = tmp6 * tmp7
tmp9 = 0.0
tmp10 = tmp8 + tmp9
tmp11 = tmp1 * tmp10
tl.store(out_ptr0 + x0, tmp11, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1,), (1,))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_0[grid(64)](primals_3, primals_1,
primals_2, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1)
return buf0, primals_1, primals_3, reinterpret_tensor(primals_2, (4, 4,
4), (16, 4, 1), 0)
class ScalarMixNew(nn.Module):
"""
Computes a parameterised scalar mixture of :math:`N` tensors, :math:`mixture = \\gamma * \\sum_{k}(s_k * tensor_k)`
where :math:`s = \\mathrm{softmax}(w)`, with :math:`w` and :math:`\\gamma` scalar parameters.
Args:
n_layers (int):
The number of layers to be mixed, i.e., :math:`N`.
dropout (float):
The dropout ratio of the layer weights.
If dropout > 0, then for each scalar weight, adjust its softmax weight mass to 0
with the dropout probability (i.e., setting the unnormalized weight to -inf).
This effectively redistributes the dropped probability mass to all other weights.
Default: 0.
"""
def __init__(self, n_layers, dropout=0):
super().__init__()
self.n_layers = n_layers
self.weights = nn.Parameter(torch.zeros(n_layers))
self.gamma = nn.Parameter(torch.tensor([1.0]))
self.dropout = nn.Dropout(dropout)
def __repr__(self):
s = f'n_layers={self.n_layers}'
if self.dropout.p > 0:
s += f', dropout={self.dropout.p}'
return f'{self.__class__.__name__}({s})'
def forward(self, input_0):
primals_1 = self.weights
primals_3 = self.gamma
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| ashim95/parser | ScalarMix | false | 6,240 | [
"MIT"
] | 1 | 61e9cd6bf16dcf1aa2b9d51b3a6c04ed048b3199 | https://github.com/ashim95/parser/tree/61e9cd6bf16dcf1aa2b9d51b3a6c04ed048b3199 | import torch
import torch.nn as nn
class Model(nn.Module):
"""
Computes a parameterised scalar mixture of :math:`N` tensors, :math:`mixture = \\gamma * \\sum_{k}(s_k * tensor_k)`
where :math:`s = \\mathrm{softmax}(w)`, with :math:`w` and :math:`\\gamma` scalar parameters.
Args:
n_layers (int):
The number of layers to be mixed, i.e., :math:`N`.
dropout (float):
The dropout ratio of the layer weights.
If dropout > 0, then for each scalar weight, adjust its softmax weight mass to 0
with the dropout probability (i.e., setting the unnormalized weight to -inf).
This effectively redistributes the dropped probability mass to all other weights.
Default: 0.
"""
def __init__(self, n_layers, dropout=0):
super().__init__()
self.n_layers = n_layers
self.weights = nn.Parameter(torch.zeros(n_layers))
self.gamma = nn.Parameter(torch.tensor([1.0]))
self.dropout = nn.Dropout(dropout)
def __repr__(self):
s = f'n_layers={self.n_layers}'
if self.dropout.p > 0:
s += f', dropout={self.dropout.p}'
return f'{self.__class__.__name__}({s})'
def forward(self, tensors):
"""
Args:
tensors (list[~torch.Tensor]):
:math:`N` tensors to be mixed.
Returns:
The mixture of :math:`N` tensors.
"""
normed_weights = self.dropout(self.weights.softmax(-1))
weighted_sum = sum(w * h for w, h in zip(normed_weights, tensors))
return self.gamma * weighted_sum
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [1]
|
CapsuleLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/pr/cprzmk46lzbq3wwm3fdlxhtbnwymkr6wunytnriiqf3dz27ndnvh.py
# Topologically Sorted Source Nodes: [sub, relu, left, mul, sub_2, mul_1, sub_1, relu_1, right, mul_2, margin_loss, margin_loss_1], Original ATen: [aten.rsub, aten.relu, aten.pow, aten.mul, aten.sub, aten.add, aten.sum]
# Source node to ATen node mapping:
# left => pow_1
# margin_loss => add
# margin_loss_1 => sum_1
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# relu => relu
# relu_1 => relu_1
# right => pow_2
# sub => sub
# sub_1 => sub_1
# sub_2 => sub_2
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (0.9, %arg1_1), kwargs = {})
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub,), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%relu, 2), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg2_1, %pow_1), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %arg2_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, 0.5), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, 0.1), kwargs = {})
# %relu_1 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_1,), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%relu_1, 2), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %pow_2), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%add,), kwargs = {})
triton_per_fused_add_mul_pow_relu_rsub_sub_sum_0 = async_compile.triton('triton_per_fused_add_mul_pow_relu_rsub_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mul_pow_relu_rsub_sub_sum_0', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_mul_pow_relu_rsub_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = 0.9
tmp3 = tmp2 - tmp1
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tmp6 = tmp5 * tmp5
tmp7 = tmp0 * tmp6
tmp8 = 1.0
tmp9 = tmp8 - tmp0
tmp10 = 0.5
tmp11 = tmp9 * tmp10
tmp12 = 0.1
tmp13 = tmp1 - tmp12
tmp14 = triton_helpers.maximum(tmp4, tmp13)
tmp15 = tmp14 * tmp14
tmp16 = tmp11 * tmp15
tmp17 = tmp7 + tmp16
tmp18 = tl.broadcast_to(tmp17, [RBLOCK])
tmp20 = triton_helpers.promote_to_tensor(tl.sum(tmp18, 0))
tl.store(out_ptr0 + (tl.full([1], 0, tl.int32)), tmp20, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/2i/c2iudvf7nvwf6e57k65u5c7ley2r4yshxdnqzbds6k36qn535ehv.py
# Topologically Sorted Source Nodes: [sub_3, pow_3, recons_loss], Original ATen: [aten.sub, aten.pow, aten.sum]
# Source node to ATen node mapping:
# pow_3 => pow_3
# recons_loss => sum_2
# sub_3 => sub_3
# Graph fragment:
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %arg3_1), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_3, 2), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%pow_3,), kwargs = {})
triton_red_fused_pow_sub_sum_1 = async_compile.triton('triton_red_fused_pow_sub_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[8, 8192],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_pow_sub_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused_pow_sub_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 8
rnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
_tmp5 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 % 4096), rmask, eviction_policy='evict_last', other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + (8192*x0)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = _tmp5 + tmp4
_tmp5 = tl.where(rmask & xmask, tmp6, _tmp5)
tmp5 = tl.sum(_tmp5, 1)[:, None]
tl.store(out_ptr0 + (x0), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/tw/ctw54bhhmc22e2rmlozzpqh6hw65h3wqmyh357ii2bgalesstcsq.py
# Topologically Sorted Source Nodes: [sub_3, pow_3, recons_loss, mul_3, add_1, truediv], Original ATen: [aten.sub, aten.pow, aten.sum, aten.mul, aten.add, aten.div]
# Source node to ATen node mapping:
# add_1 => add_1
# mul_3 => mul_3
# pow_3 => pow_3
# recons_loss => sum_2
# sub_3 => sub_3
# truediv => div
# Graph fragment:
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %arg3_1), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_3, 2), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%pow_3,), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_2, 0.0005), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, %mul_3), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_1, 4), kwargs = {})
triton_per_fused_add_div_mul_pow_sub_sum_2 = async_compile.triton('triton_per_fused_add_div_mul_pow_sub_sum_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 8],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mul_pow_sub_sum_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mul_pow_sub_sum_2(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 8
RBLOCK: tl.constexpr = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp4 = tl.load(in_out_ptr0 + (0))
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, 1])
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.sum(tmp1, 1)[:, None]
tmp6 = 0.0005
tmp7 = tmp3 * tmp6
tmp8 = tmp5 + tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp10, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 64), (1024, 256, 64, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 1024), (16384, 4096, 1024, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [sub, relu, left, mul, sub_2, mul_1, sub_1, relu_1, right, mul_2, margin_loss, margin_loss_1], Original ATen: [aten.rsub, aten.relu, aten.pow, aten.mul, aten.sub, aten.add, aten.sum]
stream0 = get_raw_stream(0)
triton_per_fused_add_mul_pow_relu_rsub_sub_sum_0.run(arg2_1, arg1_1, buf0, 1, 256, grid=grid(1), stream=stream0)
del arg1_1
del arg2_1
buf1 = empty_strided_cuda((8, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [sub_3, pow_3, recons_loss], Original ATen: [aten.sub, aten.pow, aten.sum]
triton_red_fused_pow_sub_sum_1.run(arg0_1, arg3_1, buf1, 8, 8192, grid=grid(8), stream=stream0)
del arg0_1
del arg3_1
buf3 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [sub_3, pow_3, recons_loss, mul_3, add_1, truediv], Original ATen: [aten.sub, aten.pow, aten.sum, aten.mul, aten.add, aten.div]
triton_per_fused_add_div_mul_pow_sub_sum_2.run(buf3, buf1, 1, 8, grid=grid(1), stream=stream0)
del buf1
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 64), (1024, 256, 64, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg3_1 = rand_strided((4, 4, 4, 1024), (16384, 4096, 1024, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class CapsuleLoss(nn.Module):
def __init__(self):
super(CapsuleLoss, self).__init__()
def forward(self, inputs, labels, logits, recons):
batch_size = inputs.shape[0]
left = F.relu(0.9 - logits, inplace=True) ** 2
right = F.relu(logits - 0.1, inplace=True) ** 2
margin_loss = labels * left + 0.5 * (1.0 - labels) * right
margin_loss = margin_loss.sum()
recons_loss = ((inputs.view(batch_size, -1) - recons) ** 2).sum()
return (margin_loss + 0.0005 * recons_loss) / batch_size
def get_inputs():
return [torch.rand([4, 4, 4, 64]), torch.rand([4, 4, 4, 4]), torch.rand
([4, 4, 4, 4]), torch.rand([4, 4, 4, 1024])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_mul_pow_relu_rsub_sub_sum_0(in_ptr0, in_ptr1,
out_ptr0, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = 0.9
tmp3 = tmp2 - tmp1
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tmp6 = tmp5 * tmp5
tmp7 = tmp0 * tmp6
tmp8 = 1.0
tmp9 = tmp8 - tmp0
tmp10 = 0.5
tmp11 = tmp9 * tmp10
tmp12 = 0.1
tmp13 = tmp1 - tmp12
tmp14 = triton_helpers.maximum(tmp4, tmp13)
tmp15 = tmp14 * tmp14
tmp16 = tmp11 * tmp15
tmp17 = tmp7 + tmp16
tmp18 = tl.broadcast_to(tmp17, [RBLOCK])
tmp20 = triton_helpers.promote_to_tensor(tl.sum(tmp18, 0))
tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp20, None)
@triton.jit
def triton_red_fused_pow_sub_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 8
rnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
_tmp5 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + r1 % 4096, rmask, eviction_policy=
'evict_last', other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + 8192 * x0), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = _tmp5 + tmp4
_tmp5 = tl.where(rmask & xmask, tmp6, _tmp5)
tmp5 = tl.sum(_tmp5, 1)[:, None]
tl.store(out_ptr0 + x0, tmp5, xmask)
@triton.jit
def triton_per_fused_add_div_mul_pow_sub_sum_2(in_out_ptr0, in_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 8
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp4 = tl.load(in_out_ptr0 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, 1])
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.sum(tmp1, 1)[:, None]
tmp6 = 0.0005
tmp7 = tmp3 * tmp6
tmp8 = tmp5 + tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp10, None)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 64), (1024, 256, 64, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 1024), (16384, 4096, 1024, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_add_mul_pow_relu_rsub_sub_sum_0[grid(1)](arg2_1,
arg1_1, buf0, 1, 256, num_warps=2, num_stages=1)
del arg1_1
del arg2_1
buf1 = empty_strided_cuda((8,), (1,), torch.float32)
triton_red_fused_pow_sub_sum_1[grid(8)](arg0_1, arg3_1, buf1, 8,
8192, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1)
del arg0_1
del arg3_1
buf3 = buf0
del buf0
triton_per_fused_add_div_mul_pow_sub_sum_2[grid(1)](buf3, buf1, 1,
8, XBLOCK=1, num_warps=2, num_stages=1)
del buf1
return buf3,
class CapsuleLossNew(nn.Module):
def __init__(self):
super(CapsuleLossNew, self).__init__()
def forward(self, input_0, input_1, input_2, input_3):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
arg3_1 = input_3
output = call([arg0_1, arg1_1, arg2_1, arg3_1])
return output[0]
| ashawkey/CapsNet.pytorch | CapsuleLoss | false | 6,241 | [
"MIT"
] | 1 | 3b796b572bbabe79cc445c35913cd3584733aedf | https://github.com/ashawkey/CapsNet.pytorch/tree/3b796b572bbabe79cc445c35913cd3584733aedf | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
def forward(self, inputs, labels, logits, recons):
batch_size = inputs.shape[0]
left = F.relu(0.9 - logits, inplace=True) ** 2
right = F.relu(logits - 0.1, inplace=True) ** 2
margin_loss = labels * left + 0.5 * (1.0 - labels) * right
margin_loss = margin_loss.sum()
recons_loss = ((inputs.view(batch_size, -1) - recons) ** 2).sum()
return (margin_loss + 0.0005 * recons_loss) / batch_size
def get_inputs():
return [torch.rand([4, 4, 4, 64]), torch.rand([4, 4, 4, 4]), torch.rand
([4, 4, 4, 4]), torch.rand([4, 4, 4, 1024])]
def get_init_inputs():
return []
|
BCEIoULoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/yd/cydbgmbwdsyrgveumq4ircubrsy73dwqmiav66uegjnj6nwfzt5v.py
# Topologically Sorted Source Nodes: [outputs, mul, intersection, sum_2, sum_3], Original ATen: [aten.sigmoid, aten.mul, aten.sum]
# Source node to ATen node mapping:
# intersection => sum_1
# mul => mul
# outputs => sigmoid
# sum_2 => sum_2
# sum_3 => sum_3
# Graph fragment:
# %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%arg0_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %sigmoid), kwargs = {})
# %sum_1 : [num_users=2] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [0, 2, 3]), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%arg1_1, [0, 2, 3]), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sigmoid, [0, 2, 3]), kwargs = {})
triton_per_fused_mul_sigmoid_sum_0 = async_compile.triton('triton_per_fused_mul_sigmoid_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mul_sigmoid_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mul_sigmoid_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex % 16
r2 = (rindex // 16)
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0) + (64*r2)), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + (16*x0) + (64*r2)), xmask, other=0.0)
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp10 = tl.where(xmask, tmp8, 0)
tmp11 = tl.sum(tmp10, 1)[:, None]
tmp12 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp14 = tl.where(xmask, tmp12, 0)
tmp15 = tl.sum(tmp14, 1)[:, None]
tl.store(out_ptr0 + (x0), tmp7, xmask)
tl.store(out_ptr1 + (x0), tmp11, xmask)
tl.store(out_ptr2 + (x0), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/rv/crv7ncr5fo3dojyw5tg6eobls6nb4rshhz6qqlti5dtgcqhbt7o7.py
# Topologically Sorted Source Nodes: [union, eq, float_1, mul_1, add_1, sub, add_2, iou_score, iou], Original ATen: [aten.add, aten.eq, aten._to_copy, aten.mul, aten.sub, aten.div, aten.mean]
# Source node to ATen node mapping:
# add_1 => add_1
# add_2 => add_2
# eq => eq
# float_1 => convert_element_type
# iou => mean
# iou_score => div
# mul_1 => mul_1
# sub => sub
# union => add
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, %sum_3), kwargs = {})
# %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%add, 0), kwargs = {})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%eq, torch.float32), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type, 1e-07), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, %mul_1), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %sum_1), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub, 1e-07), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_1, %add_2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%div,), kwargs = {})
triton_per_fused__to_copy_add_div_eq_mean_mul_sub_1 = async_compile.triton('triton_per_fused__to_copy_add_div_eq_mean_mul_sub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__to_copy_add_div_eq_mean_mul_sub_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__to_copy_add_div_eq_mean_mul_sub_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = tl.load(in_ptr2 + (r0), None)
tmp3 = tmp1 + tmp2
tmp4 = 0.0
tmp5 = tmp3 == tmp4
tmp6 = tmp5.to(tl.float32)
tmp7 = 1e-07
tmp8 = tmp6 * tmp7
tmp9 = tmp0 + tmp8
tmp10 = tmp3 - tmp0
tmp11 = tmp10 + tmp7
tmp12 = tmp9 / tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.sum(tmp13, 1)[:, None]
tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp15, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/dw/cdwei3fvazm5uavbfdb3c4oeyaid6eotsstmmsiewhvrjhbkh7ek.py
# Topologically Sorted Source Nodes: [union, eq, float_1, mul_1, add_1, sub, add_2, iou_score, iou, iou_1, bce, loss], Original ATen: [aten.add, aten.eq, aten._to_copy, aten.mul, aten.sub, aten.div, aten.mean, aten.rsub, aten.binary_cross_entropy_with_logits]
# Source node to ATen node mapping:
# add_1 => add_1
# add_2 => add_2
# bce => abs_1, exp, full_default, log1p, mean_1, minimum, mul_2, neg, sub_2, sub_3, sub_4
# eq => eq
# float_1 => convert_element_type
# iou => mean
# iou_1 => sub_1
# iou_score => div
# loss => add_3
# mul_1 => mul_1
# sub => sub
# union => add
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, %sum_3), kwargs = {})
# %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%add, 0), kwargs = {})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%eq, torch.float32), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type, 1e-07), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, %mul_1), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %sum_1), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub, 1e-07), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_1, %add_2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%div,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %mean), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %arg0_1), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default, %arg0_1), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%arg0_1,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %log1p), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_2, %sub_3), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_4,), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_1, %mean_1), kwargs = {})
triton_per_fused__to_copy_add_binary_cross_entropy_with_logits_div_eq_mean_mul_rsub_sub_2 = async_compile.triton('triton_per_fused__to_copy_add_binary_cross_entropy_with_logits_div_eq_mean_mul_rsub_sub_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__to_copy_add_binary_cross_entropy_with_logits_div_eq_mean_mul_rsub_sub_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__to_copy_add_binary_cross_entropy_with_logits_div_eq_mean_mul_rsub_sub_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp3 = tl.load(in_ptr1 + (r0), None)
tmp16 = tl.load(in_out_ptr0 + (0))
tmp17 = tl.broadcast_to(tmp16, [1])
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp2 * tmp3
tmp5 = 0.0
tmp6 = triton_helpers.minimum(tmp5, tmp3)
tmp7 = tl_math.abs(tmp3)
tmp8 = -tmp7
tmp9 = tl_math.exp(tmp8)
tmp10 = libdevice.log1p(tmp9)
tmp11 = tmp6 - tmp10
tmp12 = tmp4 - tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp18 = 4.0
tmp19 = tmp17 / tmp18
tmp20 = tmp1 - tmp19
tmp21 = 256.0
tmp22 = tmp15 / tmp21
tmp23 = tmp20 + tmp22
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp23, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf1 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf2 = empty_strided_cuda((4, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [outputs, mul, intersection, sum_2, sum_3], Original ATen: [aten.sigmoid, aten.mul, aten.sum]
stream0 = get_raw_stream(0)
triton_per_fused_mul_sigmoid_sum_0.run(arg1_1, arg0_1, buf0, buf1, buf2, 4, 64, grid=grid(4), stream=stream0)
buf3 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [union, eq, float_1, mul_1, add_1, sub, add_2, iou_score, iou], Original ATen: [aten.add, aten.eq, aten._to_copy, aten.mul, aten.sub, aten.div, aten.mean]
triton_per_fused__to_copy_add_div_eq_mean_mul_sub_1.run(buf0, buf1, buf2, buf3, 1, 4, grid=grid(1), stream=stream0)
del buf0
del buf1
del buf2
buf5 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [union, eq, float_1, mul_1, add_1, sub, add_2, iou_score, iou, iou_1, bce, loss], Original ATen: [aten.add, aten.eq, aten._to_copy, aten.mul, aten.sub, aten.div, aten.mean, aten.rsub, aten.binary_cross_entropy_with_logits]
triton_per_fused__to_copy_add_binary_cross_entropy_with_logits_div_eq_mean_mul_rsub_sub_2.run(buf5, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from typing import Callable
from functools import partial
from torch import nn
import torch.distributed
from torch.nn.modules.loss import *
from torch.nn.modules import *
from torch.optim import *
from torch.optim.lr_scheduler import *
import torch.backends
def get_activation_fn(activation: 'str'=None):
"""Returns the activation function from ``torch.nn`` by its name."""
if activation is None or activation.lower() == 'none':
def activation_fn(x):
return x
else:
activation_fn = torch.nn.__dict__[activation]()
return activation_fn
def wrap_metric_fn_with_activation(metric_fn: 'Callable', activation: 'str'
=None):
"""Wraps model outputs for ``metric_fn` with specified ``activation``.
Args:
metric_fn: metric function to compute
activation: activation name to use
Returns:
wrapped metric function with wrapped model outputs
.. note::
Works only with ``metric_fn`` like
``metric_fn(outputs, targets, *args, **kwargs)``.
"""
activation_fn = get_activation_fn(activation)
def wrapped_metric_fn(outputs: 'torch.Tensor', targets: 'torch.Tensor',
*args, **kwargs):
outputs = activation_fn(outputs)
output = metric_fn(outputs, targets, *args, **kwargs)
return output
return wrapped_metric_fn
def iou(outputs: 'torch.Tensor', targets: 'torch.Tensor', class_dim: 'int'=
1, threshold: 'float'=None, eps: 'float'=1e-07) ->torch.Tensor:
"""Computes the iou/jaccard score.
Args:
outputs: [N; K; ...] tensor that for each of the N examples
indicates the probability of the example belonging to each of
the K classes, according to the model.
targets: binary [N; K; ...] tensort that encodes which of the K
classes are associated with the N-th input
class_dim: indicates class dimention (K) for
``outputs`` and ``targets`` tensors (default = 1)
threshold: threshold for outputs binarization
eps: epsilon to avoid zero division
Returns:
IoU (Jaccard) score
Examples:
>>> size = 4
>>> half_size = size // 2
>>> shape = (1, 1, size, size)
>>> empty = torch.zeros(shape)
>>> full = torch.ones(shape)
>>> left = torch.ones(shape)
>>> left[:, :, :, half_size:] = 0
>>> right = torch.ones(shape)
>>> right[:, :, :, :half_size] = 0
>>> top_left = torch.zeros(shape)
>>> top_left[:, :, :half_size, :half_size] = 1
>>> pred = torch.cat([empty, left, empty, full, left, top_left], dim=1)
>>> targets = torch.cat([full, right, empty, full, left, left], dim=1)
>>> iou(
>>> outputs=pred,
>>> targets=targets,
>>> class_dim=1,
>>> threshold=0.5,
>>> )
tensor([0.0000, 0.0000, 1.0000, 1.0000, 1.0000, 0.5])
"""
if threshold is not None:
outputs = (outputs > threshold).float()
num_dims = len(outputs.shape)
assert num_dims > 2, 'shape mismatch, please check the docs for more info'
assert outputs.shape == targets.shape, 'shape mismatch, please check the docs for more info'
dims = list(range(num_dims))
if class_dim < 0:
class_dim = num_dims + class_dim
dims.pop(class_dim)
sum_fn = partial(torch.sum, dim=dims)
intersection = sum_fn(targets * outputs)
union = sum_fn(targets) + sum_fn(outputs)
iou_score = (intersection + eps * (union == 0).float()) / (union -
intersection + eps)
return iou_score
class IoULoss(nn.Module):
"""The intersection over union (Jaccard) loss.
@TODO: Docs. Contribution is welcome.
"""
def __init__(self, eps: 'float'=1e-07, threshold: 'float'=None,
activation: 'str'='Sigmoid'):
"""
Args:
eps: epsilon to avoid zero division
threshold: threshold for outputs binarization
activation: An torch.nn activation applied to the outputs.
Must be one of ``'none'``, ``'Sigmoid'``, ``'Softmax'``
"""
super().__init__()
metric_fn = wrap_metric_fn_with_activation(metric_fn=iou,
activation=activation)
self.loss_fn = partial(metric_fn, eps=eps, threshold=threshold)
def forward(self, outputs, targets):
"""@TODO: Docs. Contribution is welcome."""
per_class_iou = self.loss_fn(outputs, targets)
iou = torch.mean(per_class_iou)
return 1 - iou
class BCEIoULoss(nn.Module):
"""The Intersection over union (Jaccard) with BCE loss.
@TODO: Docs. Contribution is welcome.
"""
def __init__(self, eps: 'float'=1e-07, threshold: 'float'=None,
activation: 'str'='Sigmoid', reduction: 'str'='mean'):
"""
Args:
eps: epsilon to avoid zero division
threshold: threshold for outputs binarization
activation: An torch.nn activation applied to the outputs.
Must be one of ``'none'``, ``'Sigmoid'``, ``'Softmax'``
reduction: Specifies the reduction to apply
to the output of BCE
"""
super().__init__()
self.bce_loss = nn.BCEWithLogitsLoss(reduction=reduction)
self.iou_loss = IoULoss(eps, threshold, activation)
def forward(self, outputs, targets):
"""@TODO: Docs. Contribution is welcome."""
iou = self.iou_loss.forward(outputs, targets)
bce = self.bce_loss(outputs, targets)
loss = iou + bce
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from typing import Callable
from functools import partial
from torch import nn
import torch.distributed
from torch.nn.modules.loss import *
from torch.nn.modules import *
from torch.optim import *
from torch.optim.lr_scheduler import *
import torch.backends
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mul_sigmoid_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex % 16
r2 = rindex // 16
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0 + 64 * r2), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + 16 * x0 + 64 * r2), xmask, other=0.0)
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp10 = tl.where(xmask, tmp8, 0)
tmp11 = tl.sum(tmp10, 1)[:, None]
tmp12 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp14 = tl.where(xmask, tmp12, 0)
tmp15 = tl.sum(tmp14, 1)[:, None]
tl.store(out_ptr0 + x0, tmp7, xmask)
tl.store(out_ptr1 + x0, tmp11, xmask)
tl.store(out_ptr2 + x0, tmp15, xmask)
@triton.jit
def triton_per_fused__to_copy_add_div_eq_mean_mul_sub_1(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tl.load(in_ptr2 + r0, None)
tmp3 = tmp1 + tmp2
tmp4 = 0.0
tmp5 = tmp3 == tmp4
tmp6 = tmp5.to(tl.float32)
tmp7 = 1e-07
tmp8 = tmp6 * tmp7
tmp9 = tmp0 + tmp8
tmp10 = tmp3 - tmp0
tmp11 = tmp10 + tmp7
tmp12 = tmp9 / tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.sum(tmp13, 1)[:, None]
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp15, None)
@triton.jit
def triton_per_fused__to_copy_add_binary_cross_entropy_with_logits_div_eq_mean_mul_rsub_sub_2(
in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp16 = tl.load(in_out_ptr0 + 0)
tmp17 = tl.broadcast_to(tmp16, [1])
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp2 * tmp3
tmp5 = 0.0
tmp6 = triton_helpers.minimum(tmp5, tmp3)
tmp7 = tl_math.abs(tmp3)
tmp8 = -tmp7
tmp9 = tl_math.exp(tmp8)
tmp10 = libdevice.log1p(tmp9)
tmp11 = tmp6 - tmp10
tmp12 = tmp4 - tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp18 = 4.0
tmp19 = tmp17 / tmp18
tmp20 = tmp1 - tmp19
tmp21 = 256.0
tmp22 = tmp15 / tmp21
tmp23 = tmp20 + tmp22
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp23, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.float32)
buf1 = empty_strided_cuda((4,), (1,), torch.float32)
buf2 = empty_strided_cuda((4,), (1,), torch.float32)
get_raw_stream(0)
triton_per_fused_mul_sigmoid_sum_0[grid(4)](arg1_1, arg0_1, buf0,
buf1, buf2, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
buf3 = empty_strided_cuda((), (), torch.float32)
triton_per_fused__to_copy_add_div_eq_mean_mul_sub_1[grid(1)](buf0,
buf1, buf2, buf3, 1, 4, XBLOCK=1, num_warps=2, num_stages=1)
del buf0
del buf1
del buf2
buf5 = buf3
del buf3
triton_per_fused__to_copy_add_binary_cross_entropy_with_logits_div_eq_mean_mul_rsub_sub_2[
grid(1)](buf5, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf5,
def get_activation_fn(activation: 'str'=None):
"""Returns the activation function from ``torch.nn`` by its name."""
if activation is None or activation.lower() == 'none':
def activation_fn(x):
return x
else:
activation_fn = torch.nn.__dict__[activation]()
return activation_fn
def wrap_metric_fn_with_activation(metric_fn: 'Callable', activation: 'str'
=None):
"""Wraps model outputs for ``metric_fn` with specified ``activation``.
Args:
metric_fn: metric function to compute
activation: activation name to use
Returns:
wrapped metric function with wrapped model outputs
.. note::
Works only with ``metric_fn`` like
``metric_fn(outputs, targets, *args, **kwargs)``.
"""
activation_fn = get_activation_fn(activation)
def wrapped_metric_fn(outputs: 'torch.Tensor', targets: 'torch.Tensor',
*args, **kwargs):
outputs = activation_fn(outputs)
output = metric_fn(outputs, targets, *args, **kwargs)
return output
return wrapped_metric_fn
def iou(outputs: 'torch.Tensor', targets: 'torch.Tensor', class_dim: 'int'=
1, threshold: 'float'=None, eps: 'float'=1e-07) ->torch.Tensor:
"""Computes the iou/jaccard score.
Args:
outputs: [N; K; ...] tensor that for each of the N examples
indicates the probability of the example belonging to each of
the K classes, according to the model.
targets: binary [N; K; ...] tensort that encodes which of the K
classes are associated with the N-th input
class_dim: indicates class dimention (K) for
``outputs`` and ``targets`` tensors (default = 1)
threshold: threshold for outputs binarization
eps: epsilon to avoid zero division
Returns:
IoU (Jaccard) score
Examples:
>>> size = 4
>>> half_size = size // 2
>>> shape = (1, 1, size, size)
>>> empty = torch.zeros(shape)
>>> full = torch.ones(shape)
>>> left = torch.ones(shape)
>>> left[:, :, :, half_size:] = 0
>>> right = torch.ones(shape)
>>> right[:, :, :, :half_size] = 0
>>> top_left = torch.zeros(shape)
>>> top_left[:, :, :half_size, :half_size] = 1
>>> pred = torch.cat([empty, left, empty, full, left, top_left], dim=1)
>>> targets = torch.cat([full, right, empty, full, left, left], dim=1)
>>> iou(
>>> outputs=pred,
>>> targets=targets,
>>> class_dim=1,
>>> threshold=0.5,
>>> )
tensor([0.0000, 0.0000, 1.0000, 1.0000, 1.0000, 0.5])
"""
if threshold is not None:
outputs = (outputs > threshold).float()
num_dims = len(outputs.shape)
assert num_dims > 2, 'shape mismatch, please check the docs for more info'
assert outputs.shape == targets.shape, 'shape mismatch, please check the docs for more info'
dims = list(range(num_dims))
if class_dim < 0:
class_dim = num_dims + class_dim
dims.pop(class_dim)
sum_fn = partial(torch.sum, dim=dims)
intersection = sum_fn(targets * outputs)
union = sum_fn(targets) + sum_fn(outputs)
iou_score = (intersection + eps * (union == 0).float()) / (union -
intersection + eps)
return iou_score
class IoULoss(nn.Module):
"""The intersection over union (Jaccard) loss.
@TODO: Docs. Contribution is welcome.
"""
def __init__(self, eps: 'float'=1e-07, threshold: 'float'=None,
activation: 'str'='Sigmoid'):
"""
Args:
eps: epsilon to avoid zero division
threshold: threshold for outputs binarization
activation: An torch.nn activation applied to the outputs.
Must be one of ``'none'``, ``'Sigmoid'``, ``'Softmax'``
"""
super().__init__()
metric_fn = wrap_metric_fn_with_activation(metric_fn=iou,
activation=activation)
self.loss_fn = partial(metric_fn, eps=eps, threshold=threshold)
def forward(self, outputs, targets):
"""@TODO: Docs. Contribution is welcome."""
per_class_iou = self.loss_fn(outputs, targets)
iou = torch.mean(per_class_iou)
return 1 - iou
class BCEIoULossNew(nn.Module):
"""The Intersection over union (Jaccard) with BCE loss.
@TODO: Docs. Contribution is welcome.
"""
def __init__(self, eps: 'float'=1e-07, threshold: 'float'=None,
activation: 'str'='Sigmoid', reduction: 'str'='mean'):
"""
Args:
eps: epsilon to avoid zero division
threshold: threshold for outputs binarization
activation: An torch.nn activation applied to the outputs.
Must be one of ``'none'``, ``'Sigmoid'``, ``'Softmax'``
reduction: Specifies the reduction to apply
to the output of BCE
"""
super().__init__()
self.bce_loss = nn.BCEWithLogitsLoss(reduction=reduction)
self.iou_loss = IoULoss(eps, threshold, activation)
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| arjunshibu/catalyst | BCEIoULoss | false | 6,242 | [
"Apache-2.0"
] | 1 | 7160540f09530b803e5664e57db3e951fdc4dab3 | https://github.com/arjunshibu/catalyst/tree/7160540f09530b803e5664e57db3e951fdc4dab3 | import torch
from typing import Callable
from functools import partial
from torch import nn
import torch.distributed
from torch.nn.modules.loss import *
from torch.nn.modules import *
from torch.optim import *
from torch.optim.lr_scheduler import *
import torch.backends
def get_activation_fn(activation: 'str'=None):
"""Returns the activation function from ``torch.nn`` by its name."""
if activation is None or activation.lower() == 'none':
def activation_fn(x):
return x
else:
activation_fn = torch.nn.__dict__[activation]()
return activation_fn
def wrap_metric_fn_with_activation(metric_fn: 'Callable', activation: 'str'
=None):
"""Wraps model outputs for ``metric_fn` with specified ``activation``.
Args:
metric_fn: metric function to compute
activation: activation name to use
Returns:
wrapped metric function with wrapped model outputs
.. note::
Works only with ``metric_fn`` like
``metric_fn(outputs, targets, *args, **kwargs)``.
"""
activation_fn = get_activation_fn(activation)
def wrapped_metric_fn(outputs: 'torch.Tensor', targets: 'torch.Tensor',
*args, **kwargs):
outputs = activation_fn(outputs)
output = metric_fn(outputs, targets, *args, **kwargs)
return output
return wrapped_metric_fn
def iou(outputs: 'torch.Tensor', targets: 'torch.Tensor', class_dim: 'int'=
1, threshold: 'float'=None, eps: 'float'=1e-07) ->torch.Tensor:
"""Computes the iou/jaccard score.
Args:
outputs: [N; K; ...] tensor that for each of the N examples
indicates the probability of the example belonging to each of
the K classes, according to the model.
targets: binary [N; K; ...] tensort that encodes which of the K
classes are associated with the N-th input
class_dim: indicates class dimention (K) for
``outputs`` and ``targets`` tensors (default = 1)
threshold: threshold for outputs binarization
eps: epsilon to avoid zero division
Returns:
IoU (Jaccard) score
Examples:
>>> size = 4
>>> half_size = size // 2
>>> shape = (1, 1, size, size)
>>> empty = torch.zeros(shape)
>>> full = torch.ones(shape)
>>> left = torch.ones(shape)
>>> left[:, :, :, half_size:] = 0
>>> right = torch.ones(shape)
>>> right[:, :, :, :half_size] = 0
>>> top_left = torch.zeros(shape)
>>> top_left[:, :, :half_size, :half_size] = 1
>>> pred = torch.cat([empty, left, empty, full, left, top_left], dim=1)
>>> targets = torch.cat([full, right, empty, full, left, left], dim=1)
>>> iou(
>>> outputs=pred,
>>> targets=targets,
>>> class_dim=1,
>>> threshold=0.5,
>>> )
tensor([0.0000, 0.0000, 1.0000, 1.0000, 1.0000, 0.5])
"""
if threshold is not None:
outputs = (outputs > threshold).float()
num_dims = len(outputs.shape)
assert num_dims > 2, 'shape mismatch, please check the docs for more info'
assert outputs.shape == targets.shape, 'shape mismatch, please check the docs for more info'
dims = list(range(num_dims))
if class_dim < 0:
class_dim = num_dims + class_dim
dims.pop(class_dim)
sum_fn = partial(torch.sum, dim=dims)
intersection = sum_fn(targets * outputs)
union = sum_fn(targets) + sum_fn(outputs)
iou_score = (intersection + eps * (union == 0).float()) / (union -
intersection + eps)
return iou_score
class IoULoss(nn.Module):
"""The intersection over union (Jaccard) loss.
@TODO: Docs. Contribution is welcome.
"""
def __init__(self, eps: 'float'=1e-07, threshold: 'float'=None,
activation: 'str'='Sigmoid'):
"""
Args:
eps: epsilon to avoid zero division
threshold: threshold for outputs binarization
# ... truncated (>4000 chars) for memory efficiency |
Sigmoid | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/s5/cs5setzt5cmkbun4rggfce2rvlrs4lrmfpzur5emo5zhlygitedc.py
# Topologically Sorted Source Nodes: [neg, exp, add, truediv], Original ATen: [aten.neg, aten.exp, aten.add, aten.reciprocal, aten.mul]
# Source node to ATen node mapping:
# add => add
# exp => exp
# neg => neg
# truediv => mul, reciprocal
# Graph fragment:
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg0_1,), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%exp, 1), kwargs = {})
# %reciprocal : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%add,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%reciprocal, 1), kwargs = {})
triton_poi_fused_add_exp_mul_neg_reciprocal_0 = async_compile.triton('triton_poi_fused_add_exp_mul_neg_reciprocal_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_exp_mul_neg_reciprocal_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_exp_mul_neg_reciprocal_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = -tmp0
tmp2 = tl_math.exp(tmp1)
tmp3 = 1.0
tmp4 = tmp2 + tmp3
tmp5 = tl.full([1], 1, tl.int32)
tmp6 = tmp5 / tmp4
tmp7 = tmp6 * tmp3
tl.store(out_ptr0 + (x0), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [neg, exp, add, truediv], Original ATen: [aten.neg, aten.exp, aten.add, aten.reciprocal, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_add_exp_mul_neg_reciprocal_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class ActivationFunction(nn.Module):
def __init__(self):
super().__init__()
self.name = self.__class__.__name__
self.config = {'name': self.name}
class Sigmoid(ActivationFunction):
def forward(self, x):
return 1 / (1 + torch.exp(-x))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_exp_mul_neg_reciprocal_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = -tmp0
tmp2 = tl_math.exp(tmp1)
tmp3 = 1.0
tmp4 = tmp2 + tmp3
tmp5 = tl.full([1], 1, tl.int32)
tmp6 = tmp5 / tmp4
tmp7 = tmp6 * tmp3
tl.store(out_ptr0 + x0, tmp7, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_exp_mul_neg_reciprocal_0[grid(256)](arg0_1,
buf0, 256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class ActivationFunction(nn.Module):
def __init__(self):
super().__init__()
self.name = self.__class__.__name__
self.config = {'name': self.name}
class SigmoidNew(ActivationFunction):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| ashutoshml/lightning-tutorials | Sigmoid | false | 6,243 | [
"Apache-2.0"
] | 1 | 898b8b6f9852c0b80f034a3187bc1cd34dd521ce | https://github.com/ashutoshml/lightning-tutorials/tree/898b8b6f9852c0b80f034a3187bc1cd34dd521ce | import torch
import torch.nn as nn
class ActivationFunction(nn.Module):
def __init__(self):
super().__init__()
self.name = self.__class__.__name__
self.config = {'name': self.name}
class Model(ActivationFunction):
def forward(self, x):
return 1 / (1 + torch.exp(-x))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
L2Norm | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ql/cqlak5tz3s7deubsy52az4l7hpzcb4ekrbzbw4nqi6gbd7v3ukso.py
# Topologically Sorted Source Nodes: [pow_1, sum_1, sqrt, norm, truediv, x], Original ATen: [aten.pow, aten.sum, aten.sqrt, aten.add, aten.div, aten.mul]
# Source node to ATen node mapping:
# norm => add
# pow_1 => pow_1
# sqrt => sqrt
# sum_1 => sum_1
# truediv => div
# x => mul
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_1, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%sum_1,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt, 1e-10), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %add), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %view), kwargs = {})
triton_poi_fused_add_div_mul_pow_sqrt_sum_0 = async_compile.triton('triton_poi_fused_add_div_mul_pow_sqrt_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_pow_sqrt_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mul_pow_sqrt_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-10
tmp14 = tmp12 + tmp13
tmp15 = tmp0 / tmp14
tmp17 = tmp15 * tmp16
tl.store(out_ptr0 + (x3), tmp17, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pow_1, sum_1, sqrt, norm, truediv, x], Original ATen: [aten.pow, aten.sum, aten.sqrt, aten.add, aten.div, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_mul_pow_sqrt_sum_0.run(primals_1, primals_2, buf0, 256, grid=grid(256), stream=stream0)
del primals_2
return (buf0, primals_1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class L2Norm(nn.Module):
def __init__(self, n_channels, scale=1.0):
super(L2Norm, self).__init__()
self.n_channels = n_channels
self.scale = scale
self.eps = 1e-10
self.weight = nn.Parameter(torch.Tensor(self.n_channels))
self.weight.data *= 0.0
self.weight.data += self.scale
def forward(self, x):
norm = x.pow(2).sum(dim=1, keepdim=True).sqrt() + self.eps
x = x / norm * self.weight.view(1, -1, 1, 1)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_channels': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mul_pow_sqrt_sum_0(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp16 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-10
tmp14 = tmp12 + tmp13
tmp15 = tmp0 / tmp14
tmp17 = tmp15 * tmp16
tl.store(out_ptr0 + x3, tmp17, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mul_pow_sqrt_sum_0[grid(256)](primals_1,
primals_2, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
return buf0, primals_1
class L2NormNew(nn.Module):
def __init__(self, n_channels, scale=1.0):
super(L2NormNew, self).__init__()
self.n_channels = n_channels
self.scale = scale
self.eps = 1e-10
self.weight = nn.Parameter(torch.Tensor(self.n_channels))
self.weight.data *= 0.0
self.weight.data += self.scale
def forward(self, input_0):
primals_2 = self.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
| ashuk203/face-alignment | L2Norm | false | 6,244 | [
"BSD-3-Clause"
] | 1 | 1f6452ae05ede0db9bbc48331d67d8b239fa9994 | https://github.com/ashuk203/face-alignment/tree/1f6452ae05ede0db9bbc48331d67d8b239fa9994 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, n_channels, scale=1.0):
super().__init__()
self.n_channels = n_channels
self.scale = scale
self.eps = 1e-10
self.weight = nn.Parameter(torch.Tensor(self.n_channels))
self.weight.data *= 0.0
self.weight.data += self.scale
def forward(self, x):
norm = x.pow(2).sum(dim=1, keepdim=True).sqrt() + self.eps
x = x / norm * self.weight.view(1, -1, 1, 1)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
Biaffine | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/bc/cbcett6ey62xkijoadrmiqwnmvdqa242vrdqwxiw4pvecwqjoged.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %full_default], -1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = (xindex // 5)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 5, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = 1.0
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp6, tmp9, tmp10)
tmp12 = tl.where(tmp4, tmp5, tmp11)
tl.store(out_ptr0 + (x2), tmp12, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (1, 5, 5), (25, 5, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, buf0, 80, grid=grid(80), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((1, 16, 5), (80, 5, 1), torch.float32)
# Topologically Sorted Source Nodes: [s], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf0, (1, 16, 5), (0, 5, 1), 0), primals_3, out=buf1)
del primals_3
buf2 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32)
# Topologically Sorted Source Nodes: [y], Original ATen: [aten.cat]
triton_poi_fused_cat_0.run(primals_2, buf2, 80, grid=grid(80), stream=stream0)
del primals_2
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [s], Original ATen: [aten.bmm]
extern_kernels.bmm(buf2, reinterpret_tensor(buf1, (4, 5, 4), (20, 1, 5), 0), out=buf3)
del buf1
return (reinterpret_tensor(buf3, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf2, (4, 5, 4), (20, 1, 5), 0), reinterpret_tensor(buf0, (1, 5, 16), (80, 1, 5), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, 5, 5), (25, 5, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Biaffine(nn.Module):
"""
Biaffine layer for first-order scoring.
This function has a tensor of weights :math:`W` and bias terms if needed.
The score :math:`s(x, y)` of the vector pair :math:`(x, y)` is computed as :math:`x^T W y`,
in which :math:`x` and :math:`y` can be concatenated with bias terms.
References:
- Timothy Dozat and Christopher D. Manning. 2017.
`Deep Biaffine Attention for Neural Dependency Parsing`_.
Args:
n_in (int):
The size of the input feature.
n_out (int):
The number of output channels.
bias_x (bool):
If ``True``, adds a bias term for tensor :math:`x`. Default: ``True``.
bias_y (bool):
If ``True``, adds a bias term for tensor :math:`y`. Default: ``True``.
.. _Deep Biaffine Attention for Neural Dependency Parsing:
https://openreview.net/forum?id=Hk95PK9le
"""
def __init__(self, n_in, n_out=1, bias_x=True, bias_y=True):
super().__init__()
self.n_in = n_in
self.n_out = n_out
self.bias_x = bias_x
self.bias_y = bias_y
self.weight = nn.Parameter(torch.Tensor(n_out, n_in + bias_x, n_in +
bias_y))
self.reset_parameters()
def __repr__(self):
s = f'n_in={self.n_in}, n_out={self.n_out}'
if self.bias_x:
s += f', bias_x={self.bias_x}'
if self.bias_y:
s += f', bias_y={self.bias_y}'
return f'{self.__class__.__name__}({s})'
def reset_parameters(self):
nn.init.zeros_(self.weight)
def forward(self, x, y):
"""
Args:
x (torch.Tensor): ``[batch_size, seq_len, n_in]``.
y (torch.Tensor): ``[batch_size, seq_len, n_in]``.
Returns:
~torch.Tensor:
A scoring tensor of shape ``[batch_size, n_out, seq_len, seq_len]``.
If ``n_out=1``, the dimension for ``n_out`` will be squeezed automatically.
"""
if self.bias_x:
x = torch.cat((x, torch.ones_like(x[..., :1])), -1)
if self.bias_y:
y = torch.cat((y, torch.ones_like(y[..., :1])), -1)
s = torch.einsum('bxi,oij,byj->boxy', x, self.weight, y)
s = s.squeeze(1)
return s
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'n_in': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = xindex // 5
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 5, tl.int64)
tmp9 = 1.0
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp6, tmp9, tmp10)
tmp12 = tl.where(tmp4, tmp5, tmp11)
tl.store(out_ptr0 + x2, tmp12, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (1, 5, 5), (25, 5, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(80)](primals_1, buf0, 80, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((1, 16, 5), (80, 5, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (1, 16, 5), (0, 5, 1),
0), primals_3, out=buf1)
del primals_3
buf2 = empty_strided_cuda((4, 4, 5), (20, 5, 1), torch.float32)
triton_poi_fused_cat_0[grid(80)](primals_2, buf2, 80, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_2
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf2, reinterpret_tensor(buf1, (4, 5, 4), (20, 1,
5), 0), out=buf3)
del buf1
return reinterpret_tensor(buf3, (4, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf2, (4, 5, 4), (20, 1, 5), 0
), reinterpret_tensor(buf0, (1, 5, 16), (80, 1, 5), 0)
class BiaffineNew(nn.Module):
"""
Biaffine layer for first-order scoring.
This function has a tensor of weights :math:`W` and bias terms if needed.
The score :math:`s(x, y)` of the vector pair :math:`(x, y)` is computed as :math:`x^T W y`,
in which :math:`x` and :math:`y` can be concatenated with bias terms.
References:
- Timothy Dozat and Christopher D. Manning. 2017.
`Deep Biaffine Attention for Neural Dependency Parsing`_.
Args:
n_in (int):
The size of the input feature.
n_out (int):
The number of output channels.
bias_x (bool):
If ``True``, adds a bias term for tensor :math:`x`. Default: ``True``.
bias_y (bool):
If ``True``, adds a bias term for tensor :math:`y`. Default: ``True``.
.. _Deep Biaffine Attention for Neural Dependency Parsing:
https://openreview.net/forum?id=Hk95PK9le
"""
def __init__(self, n_in, n_out=1, bias_x=True, bias_y=True):
super().__init__()
self.n_in = n_in
self.n_out = n_out
self.bias_x = bias_x
self.bias_y = bias_y
self.weight = nn.Parameter(torch.Tensor(n_out, n_in + bias_x, n_in +
bias_y))
self.reset_parameters()
def __repr__(self):
s = f'n_in={self.n_in}, n_out={self.n_out}'
if self.bias_x:
s += f', bias_x={self.bias_x}'
if self.bias_y:
s += f', bias_y={self.bias_y}'
return f'{self.__class__.__name__}({s})'
def reset_parameters(self):
nn.init.zeros_(self.weight)
def forward(self, input_0, input_1):
primals_3 = self.weight
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3])
return output[0]
| ashim95/parser | Biaffine | false | 6,245 | [
"MIT"
] | 1 | 61e9cd6bf16dcf1aa2b9d51b3a6c04ed048b3199 | https://github.com/ashim95/parser/tree/61e9cd6bf16dcf1aa2b9d51b3a6c04ed048b3199 | import torch
import torch.nn as nn
class Model(nn.Module):
"""
Biaffine layer for first-order scoring.
This function has a tensor of weights :math:`W` and bias terms if needed.
The score :math:`s(x, y)` of the vector pair :math:`(x, y)` is computed as :math:`x^T W y`,
in which :math:`x` and :math:`y` can be concatenated with bias terms.
References:
- Timothy Dozat and Christopher D. Manning. 2017.
`Deep Biaffine Attention for Neural Dependency Parsing`_.
Args:
n_in (int):
The size of the input feature.
n_out (int):
The number of output channels.
bias_x (bool):
If ``True``, adds a bias term for tensor :math:`x`. Default: ``True``.
bias_y (bool):
If ``True``, adds a bias term for tensor :math:`y`. Default: ``True``.
.. _Deep Biaffine Attention for Neural Dependency Parsing:
https://openreview.net/forum?id=Hk95PK9le
"""
def __init__(self, n_in, n_out=1, bias_x=True, bias_y=True):
super().__init__()
self.n_in = n_in
self.n_out = n_out
self.bias_x = bias_x
self.bias_y = bias_y
self.weight = nn.Parameter(torch.Tensor(n_out, n_in + bias_x, n_in +
bias_y))
self.reset_parameters()
def __repr__(self):
s = f'n_in={self.n_in}, n_out={self.n_out}'
if self.bias_x:
s += f', bias_x={self.bias_x}'
if self.bias_y:
s += f', bias_y={self.bias_y}'
return f'{self.__class__.__name__}({s})'
def reset_parameters(self):
nn.init.zeros_(self.weight)
def forward(self, x, y):
"""
Args:
x (torch.Tensor): ``[batch_size, seq_len, n_in]``.
y (torch.Tensor): ``[batch_size, seq_len, n_in]``.
Returns:
~torch.Tensor:
A scoring tensor of shape ``[batch_size, n_out, seq_len, seq_len]``.
If ``n_out=1``, the dimension for ``n_out`` will be squeezed automatically.
"""
if self.bias_x:
x = torch.cat((x, torch.ones_like(x[..., :1])), -1)
if self.bias_y:
y = torch.cat((y, torch.ones_like(y[..., :1])), -1)
s = torch.einsum('bxi,oij,byj->boxy', x, self.weight, y)
s = s.squeeze(1)
return s
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [4]
|
CDM | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ap/capazs53vf7hafcoa3lzpsnuw7vvpg5foinomftp5aheilxucgrj.py
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# mul => mul
# Graph fragment:
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, %primals_2), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/vn/cvnoov3lkfe6y4psdhd435gmyla4lnjgqgpu4mqobs4f7e76owlf.py
# Topologically Sorted Source Nodes: [sum_1, mul_1, sub, mul_2, u], Original ATen: [aten.sum, aten.mul, aten.rsub, aten.add]
# Source node to ATen node mapping:
# mul_1 => mul_1
# mul_2 => mul_2
# sub => sub
# sum_1 => sum_1
# u => add
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mm, [1]), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, %sum_1), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %primals_3), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, -16), kwargs = {})
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %mul_2), kwargs = {})
triton_poi_fused_add_mul_rsub_sum_1 = async_compile.triton('triton_poi_fused_add_mul_rsub_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_rsub_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_rsub_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 * tmp7
tmp9 = 1.0
tmp10 = tmp9 - tmp0
tmp11 = -16.0
tmp12 = tmp10 * tmp11
tmp13 = tmp8 + tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/gd/cgdq755g3clp3t5icrbudwx4ir4xygtoz6ug4jo2euegtyg5mdnp.py
# Topologically Sorted Source Nodes: [p], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# p => amax, sub_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add, [1], True), kwargs = {})
# %sub_1 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %amax), kwargs = {})
triton_poi_fused__log_softmax_2 = async_compile.triton('triton_poi_fused__log_softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/j5/cj5hwj7ockkcleq56wmrpwxavcu7lllqodtnsxnd6sbzznn7lu6j.py
# Topologically Sorted Source Nodes: [p], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# p => exp, log, sub_2, sum_2
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_2,), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_1, %log), kwargs = {})
triton_poi_fused__log_softmax_3 = async_compile.triton('triton_poi_fused__log_softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(primals_3, primals_2, buf0, 16, grid=grid(16), stream=stream0)
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mm], Original ATen: [aten.mm]
extern_kernels.mm(primals_1, buf0, out=buf1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sum_1, mul_1, sub, mul_2, u], Original ATen: [aten.sum, aten.mul, aten.rsub, aten.add]
triton_poi_fused_add_mul_rsub_sum_1.run(primals_3, buf1, buf2, 16, grid=grid(16), stream=stream0)
buf3 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [p], Original ATen: [aten._log_softmax]
triton_poi_fused__log_softmax_2.run(buf2, buf3, 16, grid=grid(16), stream=stream0)
buf4 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [p], Original ATen: [aten._log_softmax]
triton_poi_fused__log_softmax_3.run(buf3, buf4, 16, grid=grid(16), stream=stream0)
del buf3
return (buf4, primals_3, buf4, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
class CDM(nn.Module):
"""
Implementation of the CDM choice model as a Pytorch module
"""
def __init__(self, n, d):
"""
Initializes a CDM model
Args:
n- number of items in the universe
d- number of dimensions for feature and context embeddings
"""
super(CDM, self).__init__()
self.fs = nn.Parameter(torch.nn.init.normal(torch.Tensor(n, d)))
self.cs = nn.Parameter(torch.nn.init.normal(torch.Tensor(d, n)))
self.m = nn.LogSoftmax()
self.d = d
self.n = n
def forward(self, x):
"""
computes the CDM choice probabilities P(.,S)
Args:
x- indicator vector for choice set S, i.e. a 'size(S)-hot' encoding of the
choice set, or a batch of these
"""
u = x * torch.sum(torch.mm(self.fs, x * self.cs), dim=1) + (1 - x
) * -16
p = self.m(u)
return p
def __str__(self):
return 'CDM-d=' + str(self.d)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'n': 4, 'd': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_mul_rsub_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 * tmp7
tmp9 = 1.0
tmp10 = tmp9 - tmp0
tmp11 = -16.0
tmp12 = tmp10 * tmp11
tmp13 = tmp8 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused__log_softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(16)](primals_3, primals_2, buf0, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, buf0, out=buf1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_mul_rsub_sum_1[grid(16)](primals_3, buf1, buf2,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf3 = buf1
del buf1
triton_poi_fused__log_softmax_2[grid(16)](buf2, buf3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf4 = buf2
del buf2
triton_poi_fused__log_softmax_3[grid(16)](buf3, buf4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf3
return buf4, primals_3, buf4, reinterpret_tensor(primals_1, (4, 4), (1,
4), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0)
class CDMNew(nn.Module):
"""
Implementation of the CDM choice model as a Pytorch module
"""
def __init__(self, n, d):
"""
Initializes a CDM model
Args:
n- number of items in the universe
d- number of dimensions for feature and context embeddings
"""
super(CDMNew, self).__init__()
self.fs = nn.Parameter(torch.nn.init.normal(torch.Tensor(n, d)))
self.cs = nn.Parameter(torch.nn.init.normal(torch.Tensor(d, n)))
self.m = nn.LogSoftmax()
self.d = d
self.n = n
def __str__(self):
return 'CDM-d=' + str(self.d)
def forward(self, input_0):
primals_1 = self.fs
primals_2 = self.cs
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| arjunsesh/lrr-neurips | CDM | false | 6,246 | [
"MIT"
] | 1 | d78106daec1e729b02a0452f74a37bf004ed243c | https://github.com/arjunsesh/lrr-neurips/tree/d78106daec1e729b02a0452f74a37bf004ed243c | import torch
import torch.nn as nn
import torch.utils.data
class Model(nn.Module):
"""
Implementation of the CDM choice model as a Pytorch module
"""
def __init__(self, n, d):
"""
Initializes a CDM model
Args:
n- number of items in the universe
d- number of dimensions for feature and context embeddings
"""
super().__init__()
self.fs = nn.Parameter(torch.nn.init.normal(torch.Tensor(n, d)))
self.cs = nn.Parameter(torch.nn.init.normal(torch.Tensor(d, n)))
self.m = nn.LogSoftmax()
self.d = d
self.n = n
def forward(self, x):
"""
computes the CDM choice probabilities P(.,S)
Args:
x- indicator vector for choice set S, i.e. a 'size(S)-hot' encoding of the
choice set, or a batch of these
"""
u = x * torch.sum(torch.mm(self.fs, x * self.cs), dim=1) + (1 - x
) * -16
p = self.m(u)
return p
def __str__(self):
return 'CDM-d=' + str(self.d)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [4, 4]
|
Tanh | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ch/cchtlpojrc3g5sxh2y4qb3nl7kymhqwhj52rofnxazdgo35daytf.py
# Topologically Sorted Source Nodes: [x_exp, neg, neg_x_exp, sub, add, truediv], Original ATen: [aten.exp, aten.neg, aten.sub, aten.add, aten.div]
# Source node to ATen node mapping:
# add => add
# neg => neg
# neg_x_exp => exp_1
# sub => sub
# truediv => div
# x_exp => exp
# Graph fragment:
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%arg0_1,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg0_1,), kwargs = {})
# %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%exp, %exp_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%exp, %exp_1), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %add), kwargs = {})
triton_poi_fused_add_div_exp_neg_sub_0 = async_compile.triton('triton_poi_fused_add_div_exp_neg_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_exp_neg_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_exp_neg_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl_math.exp(tmp0)
tmp2 = -tmp0
tmp3 = tl_math.exp(tmp2)
tmp4 = tmp1 - tmp3
tmp5 = tmp1 + tmp3
tmp6 = tmp4 / tmp5
tl.store(out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_exp, neg, neg_x_exp, sub, add, truediv], Original ATen: [aten.exp, aten.neg, aten.sub, aten.add, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_exp_neg_sub_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class ActivationFunction(nn.Module):
def __init__(self):
super().__init__()
self.name = self.__class__.__name__
self.config = {'name': self.name}
class Tanh(ActivationFunction):
def forward(self, x):
x_exp, neg_x_exp = torch.exp(x), torch.exp(-x)
return (x_exp - neg_x_exp) / (x_exp + neg_x_exp)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_exp_neg_sub_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl_math.exp(tmp0)
tmp2 = -tmp0
tmp3 = tl_math.exp(tmp2)
tmp4 = tmp1 - tmp3
tmp5 = tmp1 + tmp3
tmp6 = tmp4 / tmp5
tl.store(out_ptr0 + x0, tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_exp_neg_sub_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class ActivationFunction(nn.Module):
def __init__(self):
super().__init__()
self.name = self.__class__.__name__
self.config = {'name': self.name}
class TanhNew(ActivationFunction):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| ashutoshml/lightning-tutorials | Tanh | false | 6,247 | [
"Apache-2.0"
] | 1 | 898b8b6f9852c0b80f034a3187bc1cd34dd521ce | https://github.com/ashutoshml/lightning-tutorials/tree/898b8b6f9852c0b80f034a3187bc1cd34dd521ce | import torch
import torch.nn as nn
class ActivationFunction(nn.Module):
def __init__(self):
super().__init__()
self.name = self.__class__.__name__
self.config = {'name': self.name}
class Model(ActivationFunction):
def forward(self, x):
x_exp, neg_x_exp = torch.exp(x), torch.exp(-x)
return (x_exp - neg_x_exp) / (x_exp + neg_x_exp)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
LayerNorm | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/hv/chvc25wy6j3j5a3u4ftzbjunfad2snt7mihkwdcp6ilzxzipobhu.py
# Topologically Sorted Source Nodes: [sub, add, truediv, mul, add_1], Original ATen: [aten.sub, aten.add, aten.div, aten.mul]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# mul => mul
# sub => sub
# truediv => div
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %expand), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%expand_1, 0.0001), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %add), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %expand_2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %expand_3), kwargs = {})
triton_poi_fused_add_div_mul_sub_0 = async_compile.triton('triton_poi_fused_add_div_mul_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mul_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tmp11 = tmp1 - tmp9
tmp12 = tmp11 * tmp11
tmp13 = tmp2 - tmp9
tmp14 = tmp13 * tmp13
tmp15 = tmp12 + tmp14
tmp16 = tmp4 - tmp9
tmp17 = tmp16 * tmp16
tmp18 = tmp15 + tmp17
tmp19 = tmp6 - tmp9
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = 3.0
tmp23 = tmp21 / tmp22
tmp24 = libdevice.sqrt(tmp23)
tmp25 = 0.0001
tmp26 = tmp24 + tmp25
tmp27 = tmp10 / tmp26
tmp29 = tmp27 * tmp28
tmp31 = tmp29 + tmp30
tl.store(out_ptr0 + (x3), tmp31, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 1, 4), (4, 4, 1))
assert_size_stride(primals_3, (1, 1, 4), (4, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sub, add, truediv, mul, add_1], Original ATen: [aten.sub, aten.add, aten.div, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_mul_sub_0.run(primals_1, primals_2, primals_3, buf0, 256, grid=grid(256), stream=stream0)
del primals_2
del primals_3
return (buf0, primals_1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, 1, 4), (4, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, 1, 4), (4, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from torch.optim.lr_scheduler import *
from torch.nn import Parameter
from torch.nn.parameter import Parameter
class LayerNorm(nn.Module):
def __init__(self, hidden_size, eps=0.0001):
super(LayerNorm, self).__init__()
self.alpha = Parameter(torch.ones(1, 1, hidden_size))
self.beta = Parameter(torch.zeros(1, 1, hidden_size))
self.eps = eps
def forward(self, x):
mu = torch.mean(x, 2, keepdim=True).expand_as(x)
sigma = torch.std(x, 2, keepdim=True).expand_as(x)
return (x - mu) / (sigma + self.eps) * self.alpha.expand_as(x
) + self.beta.expand_as(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
from torch.optim.lr_scheduler import *
from torch.nn import Parameter
from torch.nn.parameter import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mul_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp28 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tmp11 = tmp1 - tmp9
tmp12 = tmp11 * tmp11
tmp13 = tmp2 - tmp9
tmp14 = tmp13 * tmp13
tmp15 = tmp12 + tmp14
tmp16 = tmp4 - tmp9
tmp17 = tmp16 * tmp16
tmp18 = tmp15 + tmp17
tmp19 = tmp6 - tmp9
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = 3.0
tmp23 = tmp21 / tmp22
tmp24 = libdevice.sqrt(tmp23)
tmp25 = 0.0001
tmp26 = tmp24 + tmp25
tmp27 = tmp10 / tmp26
tmp29 = tmp27 * tmp28
tmp31 = tmp29 + tmp30
tl.store(out_ptr0 + x3, tmp31, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 1, 4), (4, 4, 1))
assert_size_stride(primals_3, (1, 1, 4), (4, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mul_sub_0[grid(256)](primals_1, primals_2,
primals_3, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
del primals_3
return buf0, primals_1
class LayerNormNew(nn.Module):
def __init__(self, hidden_size, eps=0.0001):
super(LayerNormNew, self).__init__()
self.alpha = Parameter(torch.ones(1, 1, hidden_size))
self.beta = Parameter(torch.zeros(1, 1, hidden_size))
self.eps = eps
def forward(self, input_0):
primals_2 = self.alpha
primals_3 = self.beta
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| ashishbaghudana/san_mrc | LayerNorm | false | 6,248 | [
"BSD-3-Clause"
] | 1 | 03ed7d94c735f1fe2854bb9c208385b5fde44905 | https://github.com/ashishbaghudana/san_mrc/tree/03ed7d94c735f1fe2854bb9c208385b5fde44905 | import torch
import torch.nn as nn
from torch.optim.lr_scheduler import *
from torch.nn import Parameter
from torch.nn.parameter import Parameter
class Model(nn.Module):
def __init__(self, hidden_size, eps=0.0001):
super().__init__()
self.alpha = Parameter(torch.ones(1, 1, hidden_size))
self.beta = Parameter(torch.zeros(1, 1, hidden_size))
self.eps = eps
def forward(self, x):
mu = torch.mean(x, 2, keepdim=True).expand_as(x)
sigma = torch.std(x, 2, keepdim=True).expand_as(x)
return (x - mu) / (sigma + self.eps) * self.alpha.expand_as(x
) + self.beta.expand_as(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
ReLU | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/i6/ci6cljj43xza2uiou3edjpt2n4joy5nbyni2pwb4ajs6es7s73lq.py
# Topologically Sorted Source Nodes: [gt, float_1, mul], Original ATen: [aten.gt, aten._to_copy, aten.mul]
# Source node to ATen node mapping:
# float_1 => convert_element_type
# gt => gt
# mul => mul
# Graph fragment:
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%arg0_1, 0), kwargs = {})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%gt, torch.float32), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %convert_element_type), kwargs = {})
triton_poi_fused__to_copy_gt_mul_0 = async_compile.triton('triton_poi_fused__to_copy_gt_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_gt_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_gt_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = tmp2.to(tl.float32)
tmp4 = tmp0 * tmp3
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [gt, float_1, mul], Original ATen: [aten.gt, aten._to_copy, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused__to_copy_gt_mul_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class ActivationFunction(nn.Module):
def __init__(self):
super().__init__()
self.name = self.__class__.__name__
self.config = {'name': self.name}
class ReLU(ActivationFunction):
def forward(self, x):
return x * (x > 0).float()
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy_gt_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = tmp2.to(tl.float32)
tmp4 = tmp0 * tmp3
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__to_copy_gt_mul_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class ActivationFunction(nn.Module):
def __init__(self):
super().__init__()
self.name = self.__class__.__name__
self.config = {'name': self.name}
class ReLUNew(ActivationFunction):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| ashutoshml/lightning-tutorials | ReLU | false | 6,249 | [
"Apache-2.0"
] | 1 | 898b8b6f9852c0b80f034a3187bc1cd34dd521ce | https://github.com/ashutoshml/lightning-tutorials/tree/898b8b6f9852c0b80f034a3187bc1cd34dd521ce | import torch
import torch.nn as nn
class ActivationFunction(nn.Module):
def __init__(self):
super().__init__()
self.name = self.__class__.__name__
self.config = {'name': self.name}
class Model(ActivationFunction):
def forward(self, x):
return x * (x > 0).float()
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
ELU | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/wj/cwjwip6hm2jawfypn24ubscseuw67ksfl44vgqwsxfv47zbzs4kt.py
# Topologically Sorted Source Nodes: [gt, exp, sub, where], Original ATen: [aten.gt, aten.exp, aten.sub, aten.where]
# Source node to ATen node mapping:
# exp => exp
# gt => gt
# sub => sub
# where => where
# Graph fragment:
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%arg0_1, 0), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%arg0_1,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%exp, 1), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %arg0_1, %sub), kwargs = {})
triton_poi_fused_exp_gt_sub_where_0 = async_compile.triton('triton_poi_fused_exp_gt_sub_where_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_exp_gt_sub_where_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_exp_gt_sub_where_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = tl_math.exp(tmp0)
tmp4 = 1.0
tmp5 = tmp3 - tmp4
tmp6 = tl.where(tmp2, tmp0, tmp5)
tl.store(out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [gt, exp, sub, where], Original ATen: [aten.gt, aten.exp, aten.sub, aten.where]
stream0 = get_raw_stream(0)
triton_poi_fused_exp_gt_sub_where_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class ActivationFunction(nn.Module):
def __init__(self):
super().__init__()
self.name = self.__class__.__name__
self.config = {'name': self.name}
class ELU(ActivationFunction):
def forward(self, x):
return torch.where(x > 0, x, torch.exp(x) - 1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_exp_gt_sub_where_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = tl_math.exp(tmp0)
tmp4 = 1.0
tmp5 = tmp3 - tmp4
tmp6 = tl.where(tmp2, tmp0, tmp5)
tl.store(out_ptr0 + x0, tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_exp_gt_sub_where_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class ActivationFunction(nn.Module):
def __init__(self):
super().__init__()
self.name = self.__class__.__name__
self.config = {'name': self.name}
class ELUNew(ActivationFunction):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| ashutoshml/lightning-tutorials | ELU | false | 6,250 | [
"Apache-2.0"
] | 1 | 898b8b6f9852c0b80f034a3187bc1cd34dd521ce | https://github.com/ashutoshml/lightning-tutorials/tree/898b8b6f9852c0b80f034a3187bc1cd34dd521ce | import torch
import torch.nn as nn
class ActivationFunction(nn.Module):
def __init__(self):
super().__init__()
self.name = self.__class__.__name__
self.config = {'name': self.name}
class Model(ActivationFunction):
def forward(self, x):
return torch.where(x > 0, x, torch.exp(x) - 1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
LeakyReLU | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/5u/c5u3ruzkxr7ze3g7gzizezvj3vbd33rolhnleqfo5hbqlxo2ke5f.py
# Topologically Sorted Source Nodes: [gt, mul, where], Original ATen: [aten.gt, aten.mul, aten.where]
# Source node to ATen node mapping:
# gt => gt
# mul => mul
# where => where
# Graph fragment:
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%arg0_1, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 0.1), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %arg0_1, %mul), kwargs = {})
triton_poi_fused_gt_mul_where_0 = async_compile.triton('triton_poi_fused_gt_mul_where_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gt_mul_where_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_gt_mul_where_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 0.1
tmp4 = tmp0 * tmp3
tmp5 = tl.where(tmp2, tmp0, tmp4)
tl.store(out_ptr0 + (x0), tmp5, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [gt, mul, where], Original ATen: [aten.gt, aten.mul, aten.where]
stream0 = get_raw_stream(0)
triton_poi_fused_gt_mul_where_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class ActivationFunction(nn.Module):
def __init__(self):
super().__init__()
self.name = self.__class__.__name__
self.config = {'name': self.name}
class LeakyReLU(ActivationFunction):
def __init__(self, alpha=0.1):
super().__init__()
self.config['alpha'] = alpha
def forward(self, x):
return torch.where(x > 0, x, self.config['alpha'] * x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_gt_mul_where_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 0.1
tmp4 = tmp0 * tmp3
tmp5 = tl.where(tmp2, tmp0, tmp4)
tl.store(out_ptr0 + x0, tmp5, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_gt_mul_where_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class ActivationFunction(nn.Module):
def __init__(self):
super().__init__()
self.name = self.__class__.__name__
self.config = {'name': self.name}
class LeakyReLUNew(ActivationFunction):
def __init__(self, alpha=0.1):
super().__init__()
self.config['alpha'] = alpha
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| ashutoshml/lightning-tutorials | LeakyReLU | false | 6,251 | [
"Apache-2.0"
] | 1 | 898b8b6f9852c0b80f034a3187bc1cd34dd521ce | https://github.com/ashutoshml/lightning-tutorials/tree/898b8b6f9852c0b80f034a3187bc1cd34dd521ce | import torch
import torch.nn as nn
class ActivationFunction(nn.Module):
def __init__(self):
super().__init__()
self.name = self.__class__.__name__
self.config = {'name': self.name}
class Model(ActivationFunction):
def __init__(self, alpha=0.1):
super().__init__()
self.config['alpha'] = alpha
def forward(self, x):
return torch.where(x > 0, x, self.config['alpha'] * x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
MultiplyLuminance | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/s3/cs33uypamoewjqlubot47mhpfjzncecngllor3y3ydtbzp2x5e2h.py
# Topologically Sorted Source Nodes: [add, mul], Original ATen: [aten.add, aten.mul]
# Source node to ATen node mapping:
# add => add
# mul => mul
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %add), kwargs = {})
triton_poi_fused_add_mul_0 = async_compile.triton('triton_poi_fused_add_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask)
tmp2 = 1.0
tmp3 = tmp1 + tmp2
tmp4 = tmp0 * tmp3
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, mul], Original ATen: [aten.add, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_0.run(arg1_1, arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
del arg1_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn
class MultiplyLuminance(torch.nn.Module):
def __init__(self):
super(MultiplyLuminance, self).__init__()
def forward(self, color, luminance):
return color * (1 + luminance)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = 1.0
tmp3 = tmp1 + tmp2
tmp4 = tmp0 * tmp3
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_0[grid(256)](arg1_1, arg0_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class MultiplyLuminanceNew(torch.nn.Module):
def __init__(self):
super(MultiplyLuminanceNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| ashwinpn/Computer-Vision | MultiplyLuminance | false | 6,252 | [
"MIT"
] | 1 | 9dc3abfe416385171b76e2bad6872e10f36a12b4 | https://github.com/ashwinpn/Computer-Vision/tree/9dc3abfe416385171b76e2bad6872e10f36a12b4 | import torch
import torch.nn
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, color, luminance):
return color * (1 + luminance)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
GCNLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/jw/cjwdgjmgprkeplqkeqfp62fftsyzegxwif7tauvk3csicpqty4l7.py
# Topologically Sorted Source Nodes: [num_neighbours, node_feats_2], Original ATen: [aten.sum, aten.div]
# Source node to ATen node mapping:
# node_feats_2 => div
# num_neighbours => sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%primals_1, [-1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%bmm, %sum_1), kwargs = {})
triton_poi_fused_div_sum_0 = async_compile.triton('triton_poi_fused_div_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_sum_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(in_out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [node_feats], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [node_feats_1], Original ATen: [aten.bmm]
extern_kernels.bmm(primals_1, reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), out=buf1)
del buf0
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [num_neighbours, node_feats_2], Original ATen: [aten.sum, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_div_sum_0.run(buf2, primals_1, 64, grid=grid(64), stream=stream0)
return (buf2, primals_1, reinterpret_tensor(primals_4, (16, 4), (4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class GCNLayer(nn.Module):
def __init__(self, c_in, c_out):
super().__init__()
self.projection = nn.Linear(c_in, c_out)
def forward(self, node_feats, adj_matrix):
"""
Args:
node_feats: Tensor with node features of shape [batch_size, num_nodes, c_in]
adj_matrix: Batch of adjacency matrices of the graph. If there is an edge from i to j,
adj_matrix[b,i,j]=1 else 0. Supports directed edges by non-symmetric matrices.
Assumes to already have added the identity connections.
Shape: [batch_size, num_nodes, num_nodes]
"""
num_neighbours = adj_matrix.sum(dim=-1, keepdims=True)
node_feats = self.projection(node_feats)
node_feats = torch.bmm(adj_matrix, node_feats)
node_feats = node_feats / num_neighbours
return node_feats
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'c_in': 4, 'c_out': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_sum_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(in_out_ptr0 + x2, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_4, (16,
4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(primals_1, reinterpret_tensor(buf0, (4, 4, 4), (
16, 4, 1), 0), out=buf1)
del buf0
buf2 = buf1
del buf1
get_raw_stream(0)
triton_poi_fused_div_sum_0[grid(64)](buf2, primals_1, 64, XBLOCK=64,
num_warps=1, num_stages=1)
return buf2, primals_1, reinterpret_tensor(primals_4, (16, 4), (4, 1), 0)
class GCNLayerNew(nn.Module):
def __init__(self, c_in, c_out):
super().__init__()
self.projection = nn.Linear(c_in, c_out)
def forward(self, input_0, input_1):
primals_2 = self.projection.weight
primals_3 = self.projection.bias
primals_1 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| ashutoshml/lightning-tutorials | GCNLayer | false | 6,253 | [
"Apache-2.0"
] | 1 | 898b8b6f9852c0b80f034a3187bc1cd34dd521ce | https://github.com/ashutoshml/lightning-tutorials/tree/898b8b6f9852c0b80f034a3187bc1cd34dd521ce | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, c_in, c_out):
super().__init__()
self.projection = nn.Linear(c_in, c_out)
def forward(self, node_feats, adj_matrix):
"""
Args:
node_feats: Tensor with node features of shape [batch_size, num_nodes, c_in]
adj_matrix: Batch of adjacency matrices of the graph. If there is an edge from i to j,
adj_matrix[b,i,j]=1 else 0. Supports directed edges by non-symmetric matrices.
Assumes to already have added the identity connections.
Shape: [batch_size, num_nodes, num_nodes]
"""
num_neighbours = adj_matrix.sum(dim=-1, keepdims=True)
node_feats = self.projection(node_feats)
node_feats = torch.bmm(adj_matrix, node_feats)
node_feats = node_feats / num_neighbours
return node_feats
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
FillUpLuminance | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/jz/cjzehomfx565ov2vd77tvn6izixj4rxksoliqe4o4ulyjxbejgtw.py
# Topologically Sorted Source Nodes: [sub, mul, add], Original ATen: [aten.rsub, aten.mul, aten.add]
# Source node to ATen node mapping:
# add => add
# mul => mul
# sub => sub
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg0_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %arg1_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, %mul), kwargs = {})
triton_poi_fused_add_mul_rsub_0 = async_compile.triton('triton_poi_fused_add_mul_rsub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_rsub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_rsub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp3 = tl.load(in_ptr1 + (x0), xmask)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp2 * tmp3
tmp5 = tmp0 + tmp4
tl.store(out_ptr0 + (x0), tmp5, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sub, mul, add], Original ATen: [aten.rsub, aten.mul, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_rsub_0.run(arg0_1, arg1_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
del arg1_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn
class FillUpLuminance(torch.nn.Module):
def __init__(self):
super(FillUpLuminance, self).__init__()
def forward(self, color, luminance):
return color + (1 - color) * luminance
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_rsub_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp3 = tl.load(in_ptr1 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp2 * tmp3
tmp5 = tmp0 + tmp4
tl.store(out_ptr0 + x0, tmp5, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_rsub_0[grid(256)](arg0_1, arg1_1, buf0,
256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class FillUpLuminanceNew(torch.nn.Module):
def __init__(self):
super(FillUpLuminanceNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| ashwinpn/Computer-Vision | FillUpLuminance | false | 6,254 | [
"MIT"
] | 1 | 9dc3abfe416385171b76e2bad6872e10f36a12b4 | https://github.com/ashwinpn/Computer-Vision/tree/9dc3abfe416385171b76e2bad6872e10f36a12b4 | import torch
import torch.nn
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, color, luminance):
return color + (1 - color) * luminance
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
Triaffine | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ao/caoovxtqrx42gvkmjirowqmmbh6kppvfh5ebrzzv4kzkgwm2umii.py
# Topologically Sorted Source Nodes: [w], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# w => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_3,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1)), xmask)
tl.store(out_ptr0 + (x3), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ah/cahcbdgzcypclgrmenrcgftl53kemvcm53v6yoxzwdqjyblrincb.py
# Topologically Sorted Source Nodes: [s], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# s => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_9,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16) % 4
x3 = (xindex // 64)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1) + (64*x3)), xmask)
tl.store(out_ptr0 + (x4), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ue/cuezbfqwohbeaz3pb7qhlpx5cqgl5yrcj3nds7mhmzqk7fidklrx.py
# Topologically Sorted Source Nodes: [s], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# s => clone_2
# Graph fragment:
# %clone_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_12,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex % 4
x3 = (xindex // 4)
y0 = yindex % 4
y1 = (yindex // 4)
x5 = xindex
y4 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x3) + (16*x2) + (64*y1)), xmask & ymask)
tl.store(out_ptr0 + (x5 + (16*y4)), tmp0, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1, 1), (16, 4, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [w], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(primals_1, buf0, 64, grid=grid(64), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((1, 16, 16), (256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [w], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(primals_2, (1, 16, 4), (64, 4, 1), 0), reinterpret_tensor(buf0, (1, 4, 16), (0, 16, 1), 0), out=buf1)
del buf0
buf2 = empty_strided_cuda((4, 4, 4, 1, 4, 1), (64, 16, 4, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [s], Original ATen: [aten.clone]
triton_poi_fused_clone_1.run(buf1, buf2, 256, grid=grid(256), stream=stream0)
buf3 = reinterpret_tensor(buf1, (4, 4, 16), (64, 16, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [s], Original ATen: [aten.bmm]
extern_kernels.bmm(primals_4, reinterpret_tensor(buf2, (4, 4, 16), (64, 16, 1), 0), out=buf3)
buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4, 1, 1), (64, 16, 4, 1, 1, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [s], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(buf3, buf4, 16, 16, grid=grid(16, 16), stream=stream0)
buf5 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [s], Original ATen: [aten.bmm]
extern_kernels.bmm(primals_3, reinterpret_tensor(buf4, (4, 4, 16), (64, 16, 1), 0), out=buf5)
del buf4
return (reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 4, 1, 16), 0), reinterpret_tensor(primals_3, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(primals_4, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(primals_2, (1, 4, 16), (64, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Triaffine(nn.Module):
"""
Triaffine layer for second-order scoring.
This function has a tensor of weights :math:`W` and bias terms if needed.
The score :math:`s(x, y, z)` of the vector triple :math:`(x, y, z)` is computed as :math:`x^T z^T W y`.
Usually, :math:`x` and :math:`y` can be concatenated with bias terms.
References:
- Yu Zhang, Zhenghua Li and Min Zhang. 2020.
`Efficient Second-Order TreeCRF for Neural Dependency Parsing`_.
- Xinyu Wang, Jingxian Huang, and Kewei Tu. 2019.
`Second-Order Semantic Dependency Parsing with End-to-End Neural Networks`_.
Args:
n_in (int):
The size of the input feature.
bias_x (bool):
If ``True``, adds a bias term for tensor :math:`x`. Default: ``False``.
bias_y (bool):
If ``True``, adds a bias term for tensor :math:`y`. Default: ``False``.
.. _Efficient Second-Order TreeCRF for Neural Dependency Parsing:
https://www.aclweb.org/anthology/2020.acl-main.302/
.. _Second-Order Semantic Dependency Parsing with End-to-End Neural Networks:
https://www.aclweb.org/anthology/P19-1454/
"""
def __init__(self, n_in, bias_x=False, bias_y=False):
super().__init__()
self.n_in = n_in
self.bias_x = bias_x
self.bias_y = bias_y
self.weight = nn.Parameter(torch.Tensor(n_in + bias_x, n_in, n_in +
bias_y))
self.reset_parameters()
def __repr__(self):
s = f'n_in={self.n_in}'
if self.bias_x:
s += f', bias_x={self.bias_x}'
if self.bias_y:
s += f', bias_y={self.bias_y}'
return f'{self.__class__.__name__}({s})'
def reset_parameters(self):
nn.init.zeros_(self.weight)
def forward(self, x, y, z):
"""
Args:
x (torch.Tensor): ``[batch_size, seq_len, n_in]``.
y (torch.Tensor): ``[batch_size, seq_len, n_in]``.
z (torch.Tensor): ``[batch_size, seq_len, n_in]``.
Returns:
~torch.Tensor:
A scoring tensor of shape ``[batch_size, seq_len, seq_len, seq_len]``.
"""
if self.bias_x:
x = torch.cat((x, torch.ones_like(x[..., :1])), -1)
if self.bias_y:
y = torch.cat((y, torch.ones_like(y[..., :1])), -1)
w = torch.einsum('bzk,ikj->bzij', z, self.weight)
s = torch.einsum('bxi,bzij,byj->bzxy', x, w, y)
return s
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [[], {'n_in': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1), xmask)
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tl.store(out_ptr0 + x4, tmp0, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex % 4
x3 = xindex // 4
y0 = yindex % 4
y1 = yindex // 4
x5 = xindex
y4 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x3 + 16 * x2 + 64 * y1), xmask & ymask)
tl.store(out_ptr0 + (x5 + 16 * y4), tmp0, xmask & ymask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1, 1), (16, 4, 1, 1, 1), torch.
float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64)](primals_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((1, 16, 16), (256, 16, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_2, (1, 16, 4), (64, 4,
1), 0), reinterpret_tensor(buf0, (1, 4, 16), (0, 16, 1), 0),
out=buf1)
del buf0
buf2 = empty_strided_cuda((4, 4, 4, 1, 4, 1), (64, 16, 4, 4, 1, 1),
torch.float32)
triton_poi_fused_clone_1[grid(256)](buf1, buf2, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf3 = reinterpret_tensor(buf1, (4, 4, 16), (64, 16, 1), 0)
del buf1
extern_kernels.bmm(primals_4, reinterpret_tensor(buf2, (4, 4, 16),
(64, 16, 1), 0), out=buf3)
buf4 = reinterpret_tensor(buf2, (4, 4, 4, 4, 1, 1), (64, 16, 4, 1,
1, 1), 0)
del buf2
triton_poi_fused_clone_2[grid(16, 16)](buf3, buf4, 16, 16, XBLOCK=
16, YBLOCK=16, num_warps=4, num_stages=1)
buf5 = buf3
del buf3
extern_kernels.bmm(primals_3, reinterpret_tensor(buf4, (4, 4, 16),
(64, 16, 1), 0), out=buf5)
del buf4
return reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 4, 1, 16), 0
), reinterpret_tensor(primals_3, (4, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(primals_4, (4, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(primals_2, (1, 4, 16), (64, 1, 4), 0)
class TriaffineNew(nn.Module):
"""
Triaffine layer for second-order scoring.
This function has a tensor of weights :math:`W` and bias terms if needed.
The score :math:`s(x, y, z)` of the vector triple :math:`(x, y, z)` is computed as :math:`x^T z^T W y`.
Usually, :math:`x` and :math:`y` can be concatenated with bias terms.
References:
- Yu Zhang, Zhenghua Li and Min Zhang. 2020.
`Efficient Second-Order TreeCRF for Neural Dependency Parsing`_.
- Xinyu Wang, Jingxian Huang, and Kewei Tu. 2019.
`Second-Order Semantic Dependency Parsing with End-to-End Neural Networks`_.
Args:
n_in (int):
The size of the input feature.
bias_x (bool):
If ``True``, adds a bias term for tensor :math:`x`. Default: ``False``.
bias_y (bool):
If ``True``, adds a bias term for tensor :math:`y`. Default: ``False``.
.. _Efficient Second-Order TreeCRF for Neural Dependency Parsing:
https://www.aclweb.org/anthology/2020.acl-main.302/
.. _Second-Order Semantic Dependency Parsing with End-to-End Neural Networks:
https://www.aclweb.org/anthology/P19-1454/
"""
def __init__(self, n_in, bias_x=False, bias_y=False):
super().__init__()
self.n_in = n_in
self.bias_x = bias_x
self.bias_y = bias_y
self.weight = nn.Parameter(torch.Tensor(n_in + bias_x, n_in, n_in +
bias_y))
self.reset_parameters()
def __repr__(self):
s = f'n_in={self.n_in}'
if self.bias_x:
s += f', bias_x={self.bias_x}'
if self.bias_y:
s += f', bias_y={self.bias_y}'
return f'{self.__class__.__name__}({s})'
def reset_parameters(self):
nn.init.zeros_(self.weight)
def forward(self, input_0, input_1, input_2):
primals_1 = self.weight
primals_2 = input_0
primals_3 = input_1
primals_4 = input_2
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| ashim95/parser | Triaffine | false | 6,255 | [
"MIT"
] | 1 | 61e9cd6bf16dcf1aa2b9d51b3a6c04ed048b3199 | https://github.com/ashim95/parser/tree/61e9cd6bf16dcf1aa2b9d51b3a6c04ed048b3199 | import torch
import torch.nn as nn
class Model(nn.Module):
"""
Triaffine layer for second-order scoring.
This function has a tensor of weights :math:`W` and bias terms if needed.
The score :math:`s(x, y, z)` of the vector triple :math:`(x, y, z)` is computed as :math:`x^T z^T W y`.
Usually, :math:`x` and :math:`y` can be concatenated with bias terms.
References:
- Yu Zhang, Zhenghua Li and Min Zhang. 2020.
`Efficient Second-Order TreeCRF for Neural Dependency Parsing`_.
- Xinyu Wang, Jingxian Huang, and Kewei Tu. 2019.
`Second-Order Semantic Dependency Parsing with End-to-End Neural Networks`_.
Args:
n_in (int):
The size of the input feature.
bias_x (bool):
If ``True``, adds a bias term for tensor :math:`x`. Default: ``False``.
bias_y (bool):
If ``True``, adds a bias term for tensor :math:`y`. Default: ``False``.
.. _Efficient Second-Order TreeCRF for Neural Dependency Parsing:
https://www.aclweb.org/anthology/2020.acl-main.302/
.. _Second-Order Semantic Dependency Parsing with End-to-End Neural Networks:
https://www.aclweb.org/anthology/P19-1454/
"""
def __init__(self, n_in, bias_x=False, bias_y=False):
super().__init__()
self.n_in = n_in
self.bias_x = bias_x
self.bias_y = bias_y
self.weight = nn.Parameter(torch.Tensor(n_in + bias_x, n_in, n_in +
bias_y))
self.reset_parameters()
def __repr__(self):
s = f'n_in={self.n_in}'
if self.bias_x:
s += f', bias_x={self.bias_x}'
if self.bias_y:
s += f', bias_y={self.bias_y}'
return f'{self.__class__.__name__}({s})'
def reset_parameters(self):
nn.init.zeros_(self.weight)
def forward(self, x, y, z):
"""
Args:
x (torch.Tensor): ``[batch_size, seq_len, n_in]``.
y (torch.Tensor): ``[batch_size, seq_len, n_in]``.
z (torch.Tensor): ``[batch_size, seq_len, n_in]``.
Returns:
~torch.Tensor:
A scoring tensor of shape ``[batch_size, seq_len, seq_len, seq_len]``.
"""
if self.bias_x:
x = torch.cat((x, torch.ones_like(x[..., :1])), -1)
if self.bias_y:
y = torch.cat((y, torch.ones_like(y[..., :1])), -1)
w = torch.einsum('bzk,ikj->bzij', z, self.weight)
s = torch.einsum('bxi,bzij,byj->bzxy', x, w, y)
return s
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [4]
|
AvgReducePool1d | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/jn/cjnv5uptstyk4xaisuiw5kf5lbz3m33meejxhbfbsta5ozps7ijn.py
# Topologically Sorted Source Nodes: [mean], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# mean => mean
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%arg0_1, [2]), kwargs = {})
triton_poi_fused_mean_0 = async_compile.triton('triton_poi_fused_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask)
tmp3 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask)
tmp5 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mean], Original ATen: [aten.mean]
stream0 = get_raw_stream(0)
triton_poi_fused_mean_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class AvgReducePool1d(nn.Module):
"""A subclass of :torch_nn:`Module`.
Avg Pool layer for 1D inputs. The same as :torch_nn:`AvgPool1d` except that
the pooling dimension is entirely reduced (i.e., `pool_size=input_length`).
"""
def forward(self, input: 'torch.Tensor') ->torch.Tensor:
return torch.mean(input, dim=2)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del arg0_1
return buf0,
class AvgReducePool1dNew(nn.Module):
"""A subclass of :torch_nn:`Module`.
Avg Pool layer for 1D inputs. The same as :torch_nn:`AvgPool1d` except that
the pooling dimension is entirely reduced (i.e., `pool_size=input_length`).
"""
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| atif93/texar-pytorch | AvgReducePool1d | false | 6,256 | [
"Apache-2.0"
] | 1 | 88163619ec69382e1bbe57fa8bce06260bfc76a2 | https://github.com/atif93/texar-pytorch/tree/88163619ec69382e1bbe57fa8bce06260bfc76a2 | import torch
from torch import nn
class Model(nn.Module):
"""A subclass of :torch_nn:`Module`.
Avg Pool layer for 1D inputs. The same as :torch_nn:`AvgPool1d` except that
the pooling dimension is entirely reduced (i.e., `pool_size=input_length`).
"""
def forward(self, input: 'torch.Tensor') ->torch.Tensor:
return torch.mean(input, dim=2)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
CoSirenModule | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ds/cds2hiu4hiviixznddlgwpdudomtggtrh7moz4blfltypgg6ex3j.py
# Topologically Sorted Source Nodes: [cat, sub], Original ATen: [aten.cat, aten.sub]
# Source node to ATen node mapping:
# cat => cat
# sub => sub
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%sin, %cos], -1), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%cat, 0.7853981633974483), kwargs = {})
triton_poi_fused_cat_sub_0 = async_compile.triton('triton_poi_fused_cat_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 2, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((2*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl_math.sin(tmp5)
tmp7 = tl.full(tmp6.shape, 0.0, tmp6.dtype)
tmp8 = tl.where(tmp4, tmp6, tmp7)
tmp9 = tmp0 >= tmp3
tmp10 = tl.full([1], 4, tl.int64)
tmp11 = tmp0 < tmp10
tmp12 = tl.load(in_ptr0 + ((2*x1) + ((-2) + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp13 = tl_math.cos(tmp12)
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp9, tmp13, tmp14)
tmp16 = tl.where(tmp4, tmp8, tmp15)
tmp17 = 0.7853981633974483
tmp18 = tmp16 - tmp17
tl.store(out_ptr0 + (x2), tmp18, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (2, 4), (4, 1))
assert_size_stride(primals_2, (2, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 2), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat, sub], Original ATen: [aten.cat, aten.sub]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_sub_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0)
return (buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((2, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn
class CoSirenModule(torch.nn.Module):
def __init__(self, in_features, out_features, weight_multiplier=1.0):
super(CoSirenModule, self).__init__()
self.linear = torch.nn.Linear(in_features, out_features // 2)
init_bounds = math.sqrt(24 / in_features) * weight_multiplier
torch.nn.init.uniform_(self.linear.weight, a=-init_bounds, b=
init_bounds)
def forward(self, x):
x = self.linear(x)
return torch.cat([torch.sin(x), torch.cos(x)], dim=-1) - math.pi / 4
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 2, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (2 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl_math.sin(tmp5)
tmp7 = tl.full(tmp6.shape, 0.0, tmp6.dtype)
tmp8 = tl.where(tmp4, tmp6, tmp7)
tmp9 = tmp0 >= tmp3
tl.full([1], 4, tl.int64)
tmp12 = tl.load(in_ptr0 + (2 * x1 + (-2 + x0)), tmp9 & xmask,
eviction_policy='evict_last', other=0.0)
tmp13 = tl_math.cos(tmp12)
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp9, tmp13, tmp14)
tmp16 = tl.where(tmp4, tmp8, tmp15)
tmp17 = 0.7853981633974483
tmp18 = tmp16 - tmp17
tl.store(out_ptr0 + x2, tmp18, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (2, 4), (4, 1))
assert_size_stride(primals_2, (2,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 2), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_sub_0[grid(256)](buf0, buf1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0
class CoSirenModuleNew(torch.nn.Module):
def __init__(self, in_features, out_features, weight_multiplier=1.0):
super(CoSirenModuleNew, self).__init__()
self.linear = torch.nn.Linear(in_features, out_features // 2)
init_bounds = math.sqrt(24 / in_features) * weight_multiplier
torch.nn.init.uniform_(self.linear.weight, a=-init_bounds, b=
init_bounds)
def forward(self, input_0):
primals_1 = self.linear.weight
primals_2 = self.linear.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| ashwinpn/Computer-Vision | CoSirenModule | false | 6,257 | [
"MIT"
] | 1 | 9dc3abfe416385171b76e2bad6872e10f36a12b4 | https://github.com/ashwinpn/Computer-Vision/tree/9dc3abfe416385171b76e2bad6872e10f36a12b4 | import math
import torch
import torch.nn
class Model(torch.nn.Module):
def __init__(self, in_features, out_features, weight_multiplier=1.0):
super().__init__()
self.linear = torch.nn.Linear(in_features, out_features // 2)
init_bounds = math.sqrt(24 / in_features) * weight_multiplier
torch.nn.init.uniform_(self.linear.weight, a=-init_bounds, b=
init_bounds)
def forward(self, x):
x = self.linear(x)
return torch.cat([torch.sin(x), torch.cos(x)], dim=-1) - math.pi / 4
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
PotCoSirenModule | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/qe/cqel7mioyye7q3y73sso7e6bwxvlviuqkgwuel2tba3dre2atvnk.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%sin, %cos], -1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 2, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((2*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl_math.sin(tmp5)
tmp7 = tl.full(tmp6.shape, 0.0, tmp6.dtype)
tmp8 = tl.where(tmp4, tmp6, tmp7)
tmp9 = tmp0 >= tmp3
tmp10 = tl.full([1], 4, tl.int64)
tmp11 = tmp0 < tmp10
tmp12 = tl.load(in_ptr0 + ((2*x1) + ((-2) + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp13 = tl_math.cos(tmp12)
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp9, tmp13, tmp14)
tmp16 = tl.where(tmp4, tmp8, tmp15)
tl.store(out_ptr0 + (x2), tmp16, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (2, 4), (4, 1))
assert_size_stride(primals_2, (2, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 2), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0)
return (buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((2, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn
class PotCoSirenModule(torch.nn.Module):
def __init__(self, in_features, out_features, weight_multiplier=1.0):
super(PotCoSirenModule, self).__init__()
self.linear = torch.nn.Linear(in_features, out_features // 2)
torch.nn.init.uniform_(self.linear.weight, a=-weight_multiplier, b=
weight_multiplier)
self.linear.weight.data = 2 ** self.linear.weight.data
def forward(self, x):
x = self.linear(x)
return torch.cat([torch.sin(x), torch.cos(x)], dim=-1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 2, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (2 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl_math.sin(tmp5)
tmp7 = tl.full(tmp6.shape, 0.0, tmp6.dtype)
tmp8 = tl.where(tmp4, tmp6, tmp7)
tmp9 = tmp0 >= tmp3
tl.full([1], 4, tl.int64)
tmp12 = tl.load(in_ptr0 + (2 * x1 + (-2 + x0)), tmp9 & xmask,
eviction_policy='evict_last', other=0.0)
tmp13 = tl_math.cos(tmp12)
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp9, tmp13, tmp14)
tmp16 = tl.where(tmp4, tmp8, tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (2, 4), (4, 1))
assert_size_stride(primals_2, (2,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 2), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(256)](buf0, buf1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0
class PotCoSirenModuleNew(torch.nn.Module):
def __init__(self, in_features, out_features, weight_multiplier=1.0):
super(PotCoSirenModuleNew, self).__init__()
self.linear = torch.nn.Linear(in_features, out_features // 2)
torch.nn.init.uniform_(self.linear.weight, a=-weight_multiplier, b=
weight_multiplier)
self.linear.weight.data = 2 ** self.linear.weight.data
def forward(self, input_0):
primals_1 = self.linear.weight
primals_2 = self.linear.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| ashwinpn/Computer-Vision | PotCoSirenModule | false | 6,258 | [
"MIT"
] | 1 | 9dc3abfe416385171b76e2bad6872e10f36a12b4 | https://github.com/ashwinpn/Computer-Vision/tree/9dc3abfe416385171b76e2bad6872e10f36a12b4 | import torch
import torch.nn
class Model(torch.nn.Module):
def __init__(self, in_features, out_features, weight_multiplier=1.0):
super().__init__()
self.linear = torch.nn.Linear(in_features, out_features // 2)
torch.nn.init.uniform_(self.linear.weight, a=-weight_multiplier, b=
weight_multiplier)
self.linear.weight.data = 2 ** self.linear.weight.data
def forward(self, x):
x = self.linear(x)
return torch.cat([torch.sin(x), torch.cos(x)], dim=-1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
Autoencoder | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/36/c36zo3vuurwlz2vsilrwbwcrp4wjjakmhyv4hqisrsvpbkxge5p5.py
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x => convolution
# x_1 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 98304
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 6
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ws/cwsax66rhsevxcmn5xbeuksoag7q56reoaydkjeyg7zazfngkbdy.py
# Topologically Sorted Source Nodes: [max_pool2d], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# max_pool2d => _low_memory_max_pool2d_offsets_to_indices, _low_memory_max_pool2d_with_offsets, getitem
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu, [4, 4], [1, 1], [0, 0], [1, 1], False), kwargs = {})
# %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %_low_memory_max_pool2d_offsets_to_indices : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_offsets_to_indices.default](args = (%getitem_1, 4, 64, [1, 1], [0, 0]), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i64', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 89304
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 61
x1 = (xindex // 61) % 61
x2 = (xindex // 3721)
x3 = xindex % 3721
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1) + (4096*x2)), xmask)
tmp1 = tl.load(in_ptr0 + (1 + x0 + (64*x1) + (4096*x2)), xmask)
tmp3 = tl.load(in_ptr0 + (2 + x0 + (64*x1) + (4096*x2)), xmask)
tmp5 = tl.load(in_ptr0 + (3 + x0 + (64*x1) + (4096*x2)), xmask)
tmp7 = tl.load(in_ptr0 + (64 + x0 + (64*x1) + (4096*x2)), xmask)
tmp9 = tl.load(in_ptr0 + (65 + x0 + (64*x1) + (4096*x2)), xmask)
tmp11 = tl.load(in_ptr0 + (66 + x0 + (64*x1) + (4096*x2)), xmask)
tmp13 = tl.load(in_ptr0 + (67 + x0 + (64*x1) + (4096*x2)), xmask)
tmp15 = tl.load(in_ptr0 + (128 + x0 + (64*x1) + (4096*x2)), xmask)
tmp17 = tl.load(in_ptr0 + (129 + x0 + (64*x1) + (4096*x2)), xmask)
tmp19 = tl.load(in_ptr0 + (130 + x0 + (64*x1) + (4096*x2)), xmask)
tmp21 = tl.load(in_ptr0 + (131 + x0 + (64*x1) + (4096*x2)), xmask)
tmp23 = tl.load(in_ptr0 + (192 + x0 + (64*x1) + (4096*x2)), xmask)
tmp25 = tl.load(in_ptr0 + (193 + x0 + (64*x1) + (4096*x2)), xmask)
tmp27 = tl.load(in_ptr0 + (194 + x0 + (64*x1) + (4096*x2)), xmask)
tmp29 = tl.load(in_ptr0 + (195 + x0 + (64*x1) + (4096*x2)), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp20 = triton_helpers.maximum(tmp19, tmp18)
tmp22 = triton_helpers.maximum(tmp21, tmp20)
tmp24 = triton_helpers.maximum(tmp23, tmp22)
tmp26 = triton_helpers.maximum(tmp25, tmp24)
tmp28 = triton_helpers.maximum(tmp27, tmp26)
tmp30 = triton_helpers.maximum(tmp29, tmp28)
tmp31 = tmp1 > tmp0
tmp32 = tl.full([1], 1, tl.int8)
tmp33 = tl.full([1], 0, tl.int8)
tmp34 = tl.where(tmp31, tmp32, tmp33)
tmp35 = tmp3 > tmp2
tmp36 = tl.full([1], 2, tl.int8)
tmp37 = tl.where(tmp35, tmp36, tmp34)
tmp38 = tmp5 > tmp4
tmp39 = tl.full([1], 3, tl.int8)
tmp40 = tl.where(tmp38, tmp39, tmp37)
tmp41 = tmp7 > tmp6
tmp42 = tl.full([1], 4, tl.int8)
tmp43 = tl.where(tmp41, tmp42, tmp40)
tmp44 = tmp9 > tmp8
tmp45 = tl.full([1], 5, tl.int8)
tmp46 = tl.where(tmp44, tmp45, tmp43)
tmp47 = tmp11 > tmp10
tmp48 = tl.full([1], 6, tl.int8)
tmp49 = tl.where(tmp47, tmp48, tmp46)
tmp50 = tmp13 > tmp12
tmp51 = tl.full([1], 7, tl.int8)
tmp52 = tl.where(tmp50, tmp51, tmp49)
tmp53 = tmp15 > tmp14
tmp54 = tl.full([1], 8, tl.int8)
tmp55 = tl.where(tmp53, tmp54, tmp52)
tmp56 = tmp17 > tmp16
tmp57 = tl.full([1], 9, tl.int8)
tmp58 = tl.where(tmp56, tmp57, tmp55)
tmp59 = tmp19 > tmp18
tmp60 = tl.full([1], 10, tl.int8)
tmp61 = tl.where(tmp59, tmp60, tmp58)
tmp62 = tmp21 > tmp20
tmp63 = tl.full([1], 11, tl.int8)
tmp64 = tl.where(tmp62, tmp63, tmp61)
tmp65 = tmp23 > tmp22
tmp66 = tl.full([1], 12, tl.int8)
tmp67 = tl.where(tmp65, tmp66, tmp64)
tmp68 = tmp25 > tmp24
tmp69 = tl.full([1], 13, tl.int8)
tmp70 = tl.where(tmp68, tmp69, tmp67)
tmp71 = tmp27 > tmp26
tmp72 = tl.full([1], 14, tl.int8)
tmp73 = tl.where(tmp71, tmp72, tmp70)
tmp74 = tmp29 > tmp28
tmp75 = tl.full([1], 15, tl.int8)
tmp76 = tl.where(tmp74, tmp75, tmp73)
tmp77 = tl.full([1], 4, tl.int32)
tmp78 = tl.where((tmp76 < 0) != (tmp77 < 0), tl.where(tmp76 % tmp77 != 0, tmp76 // tmp77 - 1, tmp76 // tmp77), tmp76 // tmp77)
tmp79 = tmp78 * tmp77
tmp80 = tmp76 - tmp79
tmp81 = x1
tmp82 = tmp81 + tmp78
tmp83 = x0
tmp84 = tmp83 + tmp80
tmp85 = tl.full([1], 64, tl.int64)
tmp86 = tmp82 * tmp85
tmp87 = tmp86 + tmp84
tl.store(out_ptr0 + (x3 + (3744*x2)), tmp30, xmask)
tl.store(out_ptr2 + (x4), tmp87, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/pd/cpd5pehoftbarvz3ub724yds53aewxwb2mmv2bg7lfmih66qkdwi.py
# Topologically Sorted Source Nodes: [x_3, x_4], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x_3 => convolution_1
# x_4 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 238144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 3721) % 16
x0 = xindex % 3721
x4 = (xindex // 3721)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x0 + (3744*x4)), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/qq/cqqxvitrybyfuwfcfybcsvehkdckwzucobzhcpmw3en2cctbjhou.py
# Topologically Sorted Source Nodes: [max_pool2d_1], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# max_pool2d_1 => _low_memory_max_pool2d_offsets_to_indices_1, _low_memory_max_pool2d_with_offsets_1
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets_1 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu_1, [2, 2], [1, 1], [0, 0], [1, 1], False), kwargs = {})
# %_low_memory_max_pool2d_offsets_to_indices_1 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_offsets_to_indices.default](args = (%getitem_3, 2, 61, [1, 1], [0, 0]), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 230400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 60
x1 = (xindex // 60) % 60
x2 = (xindex // 3600)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (61*x1) + (3744*x2)), xmask)
tmp1 = tl.load(in_ptr0 + (1 + x0 + (61*x1) + (3744*x2)), xmask)
tmp7 = tl.load(in_ptr0 + (61 + x0 + (61*x1) + (3744*x2)), xmask)
tmp12 = tl.load(in_ptr0 + (62 + x0 + (61*x1) + (3744*x2)), xmask)
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tmp17 = tl.full([1], 2, tl.int32)
tmp18 = tl.where((tmp15 < 0) != (tmp17 < 0), tl.where(tmp15 % tmp17 != 0, tmp15 // tmp17 - 1, tmp15 // tmp17), tmp15 // tmp17)
tmp19 = tmp18 * tmp17
tmp20 = tmp15 - tmp19
tmp21 = x1
tmp22 = tmp21 + tmp18
tmp23 = x0
tmp24 = tmp23 + tmp20
tmp25 = tl.full([1], 61, tl.int64)
tmp26 = tmp22 * tmp25
tmp27 = tmp26 + tmp24
tl.store(out_ptr0 + (x3), tmp27, xmask)
tl.store(out_ptr1 + (x3), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/6b/c6bha3egneow7eyxkfbjo447hqoozs3dafjrwsn4lvlwazlneymm.py
# Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_7 => relu_2
# Graph fragment:
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%max_unpool2d,), kwargs = {})
triton_poi_fused_relu_4 = async_compile.triton('triton_poi_fused_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_4(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 238144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/hs/chsb7mkhhxxkwf7y6hsg3tdfkuq3dzjpttntloehuopyuyssb25p.py
# Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x_8 => convolution_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_2, %primals_6, %primals_7, [1, 1], [2, 2], [1, 1], True, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_5 = async_compile.triton('triton_poi_fused_convolution_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 89304
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 3721) % 6
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ra/craec5gqxwtnaszxa2sv6dhtcntjs6zsekqcy2qnqc2g3pql2j6k.py
# Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_10 => relu_3
# Graph fragment:
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%max_unpool2d_1,), kwargs = {})
triton_poi_fused_relu_6 = async_compile.triton('triton_poi_fused_relu_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_6(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 98304
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), None)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + (x0), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ng/cnglk7iqo3gr37e4n5s6qhnpvula3xtbud533gauvkttxcgmkwnl.py
# Topologically Sorted Source Nodes: [x_11], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x_11 => convolution_3
# Graph fragment:
# %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_3, %primals_8, %primals_9, [1, 1], [2, 2], [1, 1], True, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_7 = async_compile.triton('triton_poi_fused_convolution_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 49152
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 3
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (6, 3, 5, 5), (75, 25, 5, 1))
assert_size_stride(primals_2, (6, ), (1, ))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (16, 6, 5, 5), (150, 25, 5, 1))
assert_size_stride(primals_5, (16, ), (1, ))
assert_size_stride(primals_6, (16, 6, 5, 5), (150, 25, 5, 1))
assert_size_stride(primals_7, (6, ), (1, ))
assert_size_stride(primals_8, (6, 3, 5, 5), (75, 25, 5, 1))
assert_size_stride(primals_9, (3, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 6, 64, 64), (24576, 4096, 64, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 98304, grid=grid(98304), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((4, 6, 61, 61), (22464, 3744, 61, 1), torch.float32)
buf4 = empty_strided_cuda((4, 6, 61, 61), (22326, 3721, 61, 1), torch.int64)
# Topologically Sorted Source Nodes: [max_pool2d], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_1.run(buf1, buf2, buf4, 89304, grid=grid(89304), stream=stream0)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 16, 61, 61), (59536, 3721, 61, 1))
buf6 = empty_strided_cuda((4, 16, 61, 61), (59904, 3744, 61, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3, x_4], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf5, primals_5, buf6, 238144, grid=grid(238144), stream=stream0)
del buf5
del primals_5
buf7 = empty_strided_cuda((4, 16, 60, 60), (57600, 3600, 60, 1), torch.int64)
buf8 = empty_strided_cuda((4, 16, 60, 60), (57600, 3600, 60, 1), torch.float32)
# Topologically Sorted Source Nodes: [max_pool2d_1], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_3.run(buf6, buf7, buf8, 230400, grid=grid(230400), stream=stream0)
# Topologically Sorted Source Nodes: [max_pool2d_1, x_6], Original ATen: [aten.max_pool2d_with_indices, aten.max_unpool2d]
buf9 = torch.ops.aten.max_unpool2d.default(buf8, buf7, [61, 61])
del buf8
buf10 = buf9
del buf9
buf11 = buf10; del buf10 # reuse
# Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.relu]
triton_poi_fused_relu_4.run(buf11, 238144, grid=grid(238144), stream=stream0)
# Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.convolution]
buf12 = extern_kernels.convolution(buf11, primals_6, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 6, 61, 61), (22326, 3721, 61, 1))
buf13 = buf12; del buf12 # reuse
# Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.convolution]
triton_poi_fused_convolution_5.run(buf13, primals_7, 89304, grid=grid(89304), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [x_8, x_9], Original ATen: [aten.convolution, aten.max_unpool2d]
buf14 = torch.ops.aten.max_unpool2d.default(buf13, buf4, [64, 64])
del buf13
buf15 = buf14
del buf14
buf16 = buf15; del buf15 # reuse
# Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.relu]
triton_poi_fused_relu_6.run(buf16, 98304, grid=grid(98304), stream=stream0)
# Topologically Sorted Source Nodes: [x_11], Original ATen: [aten.convolution]
buf17 = extern_kernels.convolution(buf16, primals_8, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf17, (4, 3, 64, 64), (12288, 4096, 64, 1))
buf18 = buf17; del buf17 # reuse
# Topologically Sorted Source Nodes: [x_11], Original ATen: [aten.convolution]
triton_poi_fused_convolution_7.run(buf18, primals_9, 49152, grid=grid(49152), stream=stream0)
del primals_9
return (buf18, primals_1, primals_3, primals_4, primals_6, primals_8, buf1, buf2, buf4, buf6, buf7, buf11, buf16, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((6, 3, 5, 5), (75, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((6, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, 6, 5, 5), (150, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((16, 6, 5, 5), (150, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((6, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((6, 3, 5, 5), (75, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
class Autoencoder(nn.Module):
def __init__(self):
super(Autoencoder, self).__init__()
self.conv1 = nn.Conv2d(3, 6, padding=2, kernel_size=5)
self.maxpool1 = nn.MaxPool2d(4, stride=1, return_indices=True)
self.conv2 = nn.Conv2d(6, 16, padding=2, kernel_size=5)
self.maxpool2 = nn.MaxPool2d(2, stride=1, return_indices=True)
self.unpool2 = nn.MaxUnpool2d(2, stride=1)
self.unconv2 = nn.ConvTranspose2d(16, 6, padding=2, kernel_size=5)
self.unpool1 = nn.MaxUnpool2d(4, stride=1)
self.unconv1 = nn.ConvTranspose2d(6, 3, padding=2, kernel_size=5)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x, indices1 = self.maxpool1(x)
x = self.conv2(x)
x = F.relu(x)
x, indices2 = self.maxpool2(x)
x = self.unpool2(x, indices2)
x = F.relu(x)
x = self.unconv2(x)
x = self.unpool1(x, indices1)
x = F.relu(x)
x = self.unconv1(x)
return x
def encoder(self, x):
x = self.conv1(x)
x = F.relu(x)
x, indices1 = self.maxpool1(x)
x = self.conv2(x)
x = F.relu(x)
x, indices2 = self.maxpool2(x)
return x, indices1, indices2
def decoder(self, x, indices1, indices2):
x = self.conv1(x)
x = F.relu(x)
x, indices1 = self.maxpool1(x)
x = self.conv2(x)
x = F.relu(x)
x, indices2 = self.maxpool2(x)
return x, indices1, indices2
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 6
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr2,
xnumel, XBLOCK: tl.constexpr):
xnumel = 89304
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 61
x1 = xindex // 61 % 61
x2 = xindex // 3721
x3 = xindex % 3721
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1 + 4096 * x2), xmask)
tmp1 = tl.load(in_ptr0 + (1 + x0 + 64 * x1 + 4096 * x2), xmask)
tmp3 = tl.load(in_ptr0 + (2 + x0 + 64 * x1 + 4096 * x2), xmask)
tmp5 = tl.load(in_ptr0 + (3 + x0 + 64 * x1 + 4096 * x2), xmask)
tmp7 = tl.load(in_ptr0 + (64 + x0 + 64 * x1 + 4096 * x2), xmask)
tmp9 = tl.load(in_ptr0 + (65 + x0 + 64 * x1 + 4096 * x2), xmask)
tmp11 = tl.load(in_ptr0 + (66 + x0 + 64 * x1 + 4096 * x2), xmask)
tmp13 = tl.load(in_ptr0 + (67 + x0 + 64 * x1 + 4096 * x2), xmask)
tmp15 = tl.load(in_ptr0 + (128 + x0 + 64 * x1 + 4096 * x2), xmask)
tmp17 = tl.load(in_ptr0 + (129 + x0 + 64 * x1 + 4096 * x2), xmask)
tmp19 = tl.load(in_ptr0 + (130 + x0 + 64 * x1 + 4096 * x2), xmask)
tmp21 = tl.load(in_ptr0 + (131 + x0 + 64 * x1 + 4096 * x2), xmask)
tmp23 = tl.load(in_ptr0 + (192 + x0 + 64 * x1 + 4096 * x2), xmask)
tmp25 = tl.load(in_ptr0 + (193 + x0 + 64 * x1 + 4096 * x2), xmask)
tmp27 = tl.load(in_ptr0 + (194 + x0 + 64 * x1 + 4096 * x2), xmask)
tmp29 = tl.load(in_ptr0 + (195 + x0 + 64 * x1 + 4096 * x2), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp20 = triton_helpers.maximum(tmp19, tmp18)
tmp22 = triton_helpers.maximum(tmp21, tmp20)
tmp24 = triton_helpers.maximum(tmp23, tmp22)
tmp26 = triton_helpers.maximum(tmp25, tmp24)
tmp28 = triton_helpers.maximum(tmp27, tmp26)
tmp30 = triton_helpers.maximum(tmp29, tmp28)
tmp31 = tmp1 > tmp0
tmp32 = tl.full([1], 1, tl.int8)
tmp33 = tl.full([1], 0, tl.int8)
tmp34 = tl.where(tmp31, tmp32, tmp33)
tmp35 = tmp3 > tmp2
tmp36 = tl.full([1], 2, tl.int8)
tmp37 = tl.where(tmp35, tmp36, tmp34)
tmp38 = tmp5 > tmp4
tmp39 = tl.full([1], 3, tl.int8)
tmp40 = tl.where(tmp38, tmp39, tmp37)
tmp41 = tmp7 > tmp6
tmp42 = tl.full([1], 4, tl.int8)
tmp43 = tl.where(tmp41, tmp42, tmp40)
tmp44 = tmp9 > tmp8
tmp45 = tl.full([1], 5, tl.int8)
tmp46 = tl.where(tmp44, tmp45, tmp43)
tmp47 = tmp11 > tmp10
tmp48 = tl.full([1], 6, tl.int8)
tmp49 = tl.where(tmp47, tmp48, tmp46)
tmp50 = tmp13 > tmp12
tmp51 = tl.full([1], 7, tl.int8)
tmp52 = tl.where(tmp50, tmp51, tmp49)
tmp53 = tmp15 > tmp14
tmp54 = tl.full([1], 8, tl.int8)
tmp55 = tl.where(tmp53, tmp54, tmp52)
tmp56 = tmp17 > tmp16
tmp57 = tl.full([1], 9, tl.int8)
tmp58 = tl.where(tmp56, tmp57, tmp55)
tmp59 = tmp19 > tmp18
tmp60 = tl.full([1], 10, tl.int8)
tmp61 = tl.where(tmp59, tmp60, tmp58)
tmp62 = tmp21 > tmp20
tmp63 = tl.full([1], 11, tl.int8)
tmp64 = tl.where(tmp62, tmp63, tmp61)
tmp65 = tmp23 > tmp22
tmp66 = tl.full([1], 12, tl.int8)
tmp67 = tl.where(tmp65, tmp66, tmp64)
tmp68 = tmp25 > tmp24
tmp69 = tl.full([1], 13, tl.int8)
tmp70 = tl.where(tmp68, tmp69, tmp67)
tmp71 = tmp27 > tmp26
tmp72 = tl.full([1], 14, tl.int8)
tmp73 = tl.where(tmp71, tmp72, tmp70)
tmp74 = tmp29 > tmp28
tmp75 = tl.full([1], 15, tl.int8)
tmp76 = tl.where(tmp74, tmp75, tmp73)
tmp77 = tl.full([1], 4, tl.int32)
tmp78 = tl.where((tmp76 < 0) != (tmp77 < 0), tl.where(tmp76 % tmp77 !=
0, tmp76 // tmp77 - 1, tmp76 // tmp77), tmp76 // tmp77)
tmp79 = tmp78 * tmp77
tmp80 = tmp76 - tmp79
tmp81 = x1
tmp82 = tmp81 + tmp78
tmp83 = x0
tmp84 = tmp83 + tmp80
tmp85 = tl.full([1], 64, tl.int64)
tmp86 = tmp82 * tmp85
tmp87 = tmp86 + tmp84
tl.store(out_ptr0 + (x3 + 3744 * x2), tmp30, xmask)
tl.store(out_ptr2 + x4, tmp87, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 238144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3721 % 16
x0 = xindex % 3721
x4 = xindex // 3721
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x0 + 3744 * x4), tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 230400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 60
x1 = xindex // 60 % 60
x2 = xindex // 3600
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 61 * x1 + 3744 * x2), xmask)
tmp1 = tl.load(in_ptr0 + (1 + x0 + 61 * x1 + 3744 * x2), xmask)
tmp7 = tl.load(in_ptr0 + (61 + x0 + 61 * x1 + 3744 * x2), xmask)
tmp12 = tl.load(in_ptr0 + (62 + x0 + 61 * x1 + 3744 * x2), xmask)
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tmp17 = tl.full([1], 2, tl.int32)
tmp18 = tl.where((tmp15 < 0) != (tmp17 < 0), tl.where(tmp15 % tmp17 !=
0, tmp15 // tmp17 - 1, tmp15 // tmp17), tmp15 // tmp17)
tmp19 = tmp18 * tmp17
tmp20 = tmp15 - tmp19
tmp21 = x1
tmp22 = tmp21 + tmp18
tmp23 = x0
tmp24 = tmp23 + tmp20
tmp25 = tl.full([1], 61, tl.int64)
tmp26 = tmp22 * tmp25
tmp27 = tmp26 + tmp24
tl.store(out_ptr0 + x3, tmp27, xmask)
tl.store(out_ptr1 + x3, tmp16, xmask)
@triton.jit
def triton_poi_fused_relu_4(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 238144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 89304
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3721 % 6
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_relu_6(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + x0, tmp2, None)
@triton.jit
def triton_poi_fused_convolution_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 3
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (6, 3, 5, 5), (75, 25, 5, 1))
assert_size_stride(primals_2, (6,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (16, 6, 5, 5), (150, 25, 5, 1))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (16, 6, 5, 5), (150, 25, 5, 1))
assert_size_stride(primals_7, (6,), (1,))
assert_size_stride(primals_8, (6, 3, 5, 5), (75, 25, 5, 1))
assert_size_stride(primals_9, (3,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 6, 64, 64), (24576, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(98304)](buf1, primals_2,
98304, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 6, 61, 61), (22464, 3744, 61, 1),
torch.float32)
buf4 = empty_strided_cuda((4, 6, 61, 61), (22326, 3721, 61, 1),
torch.int64)
triton_poi_fused_max_pool2d_with_indices_1[grid(89304)](buf1, buf2,
buf4, 89304, XBLOCK=512, num_warps=8, num_stages=1)
buf5 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 16, 61, 61), (59536, 3721, 61, 1))
buf6 = empty_strided_cuda((4, 16, 61, 61), (59904, 3744, 61, 1),
torch.float32)
triton_poi_fused_convolution_relu_2[grid(238144)](buf5, primals_5,
buf6, 238144, XBLOCK=512, num_warps=8, num_stages=1)
del buf5
del primals_5
buf7 = empty_strided_cuda((4, 16, 60, 60), (57600, 3600, 60, 1),
torch.int64)
buf8 = empty_strided_cuda((4, 16, 60, 60), (57600, 3600, 60, 1),
torch.float32)
triton_poi_fused_max_pool2d_with_indices_3[grid(230400)](buf6, buf7,
buf8, 230400, XBLOCK=512, num_warps=8, num_stages=1)
buf9 = torch.ops.aten.max_unpool2d.default(buf8, buf7, [61, 61])
del buf8
buf10 = buf9
del buf9
buf11 = buf10
del buf10
triton_poi_fused_relu_4[grid(238144)](buf11, 238144, XBLOCK=1024,
num_warps=4, num_stages=1)
buf12 = extern_kernels.convolution(buf11, primals_6, stride=(1, 1),
padding=(2, 2), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 6, 61, 61), (22326, 3721, 61, 1))
buf13 = buf12
del buf12
triton_poi_fused_convolution_5[grid(89304)](buf13, primals_7, 89304,
XBLOCK=512, num_warps=8, num_stages=1)
del primals_7
buf14 = torch.ops.aten.max_unpool2d.default(buf13, buf4, [64, 64])
del buf13
buf15 = buf14
del buf14
buf16 = buf15
del buf15
triton_poi_fused_relu_6[grid(98304)](buf16, 98304, XBLOCK=1024,
num_warps=4, num_stages=1)
buf17 = extern_kernels.convolution(buf16, primals_8, stride=(1, 1),
padding=(2, 2), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf17, (4, 3, 64, 64), (12288, 4096, 64, 1))
buf18 = buf17
del buf17
triton_poi_fused_convolution_7[grid(49152)](buf18, primals_9, 49152,
XBLOCK=512, num_warps=4, num_stages=1)
del primals_9
return (buf18, primals_1, primals_3, primals_4, primals_6, primals_8,
buf1, buf2, buf4, buf6, buf7, buf11, buf16)
class AutoencoderNew(nn.Module):
def __init__(self):
super(AutoencoderNew, self).__init__()
self.conv1 = nn.Conv2d(3, 6, padding=2, kernel_size=5)
self.maxpool1 = nn.MaxPool2d(4, stride=1, return_indices=True)
self.conv2 = nn.Conv2d(6, 16, padding=2, kernel_size=5)
self.maxpool2 = nn.MaxPool2d(2, stride=1, return_indices=True)
self.unpool2 = nn.MaxUnpool2d(2, stride=1)
self.unconv2 = nn.ConvTranspose2d(16, 6, padding=2, kernel_size=5)
self.unpool1 = nn.MaxUnpool2d(4, stride=1)
self.unconv1 = nn.ConvTranspose2d(6, 3, padding=2, kernel_size=5)
def encoder(self, x):
x = self.conv1(x)
x = F.relu(x)
x, indices1 = self.maxpool1(x)
x = self.conv2(x)
x = F.relu(x)
x, indices2 = self.maxpool2(x)
return x, indices1, indices2
def decoder(self, x, indices1, indices2):
x = self.conv1(x)
x = F.relu(x)
x, indices1 = self.maxpool1(x)
x = self.conv2(x)
x = F.relu(x)
x, indices2 = self.maxpool2(x)
return x, indices1, indices2
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.unconv2.weight
primals_7 = self.unconv2.bias
primals_8 = self.unconv1.weight
primals_9 = self.unconv1.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| aoxolotl/slr | Autoencoder | false | 6,259 | [
"MIT"
] | 1 | 20a4a9036f2dc3a61745072f89b0f5bb1cc51e1b | https://github.com/aoxolotl/slr/tree/20a4a9036f2dc3a61745072f89b0f5bb1cc51e1b | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 6, padding=2, kernel_size=5)
self.maxpool1 = nn.MaxPool2d(4, stride=1, return_indices=True)
self.conv2 = nn.Conv2d(6, 16, padding=2, kernel_size=5)
self.maxpool2 = nn.MaxPool2d(2, stride=1, return_indices=True)
self.unpool2 = nn.MaxUnpool2d(2, stride=1)
self.unconv2 = nn.ConvTranspose2d(16, 6, padding=2, kernel_size=5)
self.unpool1 = nn.MaxUnpool2d(4, stride=1)
self.unconv1 = nn.ConvTranspose2d(6, 3, padding=2, kernel_size=5)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x, indices1 = self.maxpool1(x)
x = self.conv2(x)
x = F.relu(x)
x, indices2 = self.maxpool2(x)
x = self.unpool2(x, indices2)
x = F.relu(x)
x = self.unconv2(x)
x = self.unpool1(x, indices1)
x = F.relu(x)
x = self.unconv1(x)
return x
def encoder(self, x):
x = self.conv1(x)
x = F.relu(x)
x, indices1 = self.maxpool1(x)
x = self.conv2(x)
x = F.relu(x)
x, indices2 = self.maxpool2(x)
return x, indices1, indices2
def decoder(self, x, indices1, indices2):
x = self.conv1(x)
x = F.relu(x)
x, indices1 = self.maxpool1(x)
x = self.conv2(x)
x = F.relu(x)
x, indices2 = self.maxpool2(x)
return x, indices1, indices2
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return []
|
Embbed2 | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/j4/cj42y3ycik4bz72xcuz6dg2n7ynzf3mjr6cbopfd37chfwo2atfp.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%mul, %mul_1], -1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tl_math.sin(tmp6)
tmp8 = tmp5 * tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp4, tmp8, tmp9)
tmp11 = tmp0 >= tmp3
tmp12 = tl.full([1], 8, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tl.load(in_ptr0 + ((-4) + x0), tmp11 & xmask, eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp11 & xmask, eviction_policy='evict_last', other=0.0)
tmp16 = tl_math.cos(tmp15)
tmp17 = tmp14 * tmp16
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp11, tmp17, tmp18)
tmp20 = tl.where(tmp4, tmp10, tmp19)
tl.store(out_ptr0 + (x2), tmp20, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_3, buf0, buf1, 512, grid=grid(512), stream=stream0)
return (buf1, primals_3, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn
class Embbed2(torch.nn.Module):
def __init__(self, in_features, out_features, weight_multiplier=1.0):
super(Embbed2, self).__init__()
self.b = 2.0 ** torch.linspace(0, weight_multiplier, out_features //
in_features) - 1
self.b = torch.nn.Parameter(torch.reshape(torch.eye(in_features) *
self.b[:, None, None], [out_features, in_features]))
self.osize = out_features
self.a = torch.nn.Parameter(torch.ones(out_features))
def forward(self, x):
x = torch.matmul(x, self.b.T)
return torch.cat([self.a * torch.sin(x), self.a * torch.cos(x)], dim=-1
)
def output_size(self):
return 2 * self.osize
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x0, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp6 = tl.load(in_ptr1 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp7 = tl_math.sin(tmp6)
tmp8 = tmp5 * tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp4, tmp8, tmp9)
tmp11 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp14 = tl.load(in_ptr0 + (-4 + x0), tmp11 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp15 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp11 & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tl_math.cos(tmp15)
tmp17 = tmp14 * tmp16
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp11, tmp17, tmp18)
tmp20 = tl.where(tmp4, tmp10, tmp19)
tl.store(out_ptr0 + x2, tmp20, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](primals_3, buf0, buf1, 512,
XBLOCK=128, num_warps=4, num_stages=1)
return buf1, primals_3, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0
), buf0
class Embbed2New(torch.nn.Module):
def __init__(self, in_features, out_features, weight_multiplier=1.0):
super(Embbed2New, self).__init__()
self.b = 2.0 ** torch.linspace(0, weight_multiplier, out_features //
in_features) - 1
self.b = torch.nn.Parameter(torch.reshape(torch.eye(in_features) *
self.b[:, None, None], [out_features, in_features]))
self.osize = out_features
self.a = torch.nn.Parameter(torch.ones(out_features))
def output_size(self):
return 2 * self.osize
def forward(self, input_0):
primals_1 = self.b
primals_3 = self.a
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| ashwinpn/Computer-Vision | Embbed2 | false | 6,260 | [
"MIT"
] | 1 | 9dc3abfe416385171b76e2bad6872e10f36a12b4 | https://github.com/ashwinpn/Computer-Vision/tree/9dc3abfe416385171b76e2bad6872e10f36a12b4 | import torch
import torch.nn
class Model(torch.nn.Module):
def __init__(self, in_features, out_features, weight_multiplier=1.0):
super().__init__()
self.b = 2.0 ** torch.linspace(0, weight_multiplier, out_features //
in_features) - 1
self.b = torch.nn.Parameter(torch.reshape(torch.eye(in_features) *
self.b[:, None, None], [out_features, in_features]))
self.osize = out_features
self.a = torch.nn.Parameter(torch.ones(out_features))
def forward(self, x):
x = torch.matmul(x, self.b.T)
return torch.cat([self.a * torch.sin(x), self.a * torch.cos(x)], dim=-1
)
def output_size(self):
return 2 * self.osize
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
SkipModule | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/c4/cc4khg7fwbxxm2fufox7nnkf4gfybrmj5ir2tx3zuxfioc5b2dya.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_3, %view_3], -1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 8), (8, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x1_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, buf0, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_5
buf2 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_3, buf1, buf2, 512, grid=grid(512), stream=stream0)
buf3 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf2, (64, 8), (8, 1), 0), reinterpret_tensor(primals_6, (8, 4), (1, 8), 0), alpha=1, beta=1, out=buf3)
del primals_7
return (reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, reinterpret_tensor(buf2, (64, 8), (8, 1), 0), primals_6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn
class SkipModule(torch.nn.Module):
def __init__(self, in_features, out_features, activation=torch.nn.ReLU()):
super(SkipModule, self).__init__()
self.linear1 = torch.nn.Linear(in_features, out_features, activation)
self.linear2 = torch.nn.Linear(out_features, out_features, activation)
self.linear3 = torch.nn.Linear(in_features + out_features,
out_features, activation)
def forward(self, x):
x1 = self.linear1(x)
x1 = self.linear2(x1)
x = torch.cat((x, x1), dim=-1)
return self.linear3(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 8), (8, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, buf0, reinterpret_tensor(primals_4,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_5
buf2 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](primals_3, buf1, buf2, 512,
XBLOCK=256, num_warps=4, num_stages=1)
buf3 = buf1
del buf1
extern_kernels.addmm(primals_7, reinterpret_tensor(buf2, (64, 8), (
8, 1), 0), reinterpret_tensor(primals_6, (8, 4), (1, 8), 0),
alpha=1, beta=1, out=buf3)
del primals_7
return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0, reinterpret_tensor(buf2, (64, 8), (8, 1), 0
), primals_6, primals_4
class SkipModuleNew(torch.nn.Module):
def __init__(self, in_features, out_features, activation=torch.nn.ReLU()):
super(SkipModuleNew, self).__init__()
self.linear1 = torch.nn.Linear(in_features, out_features, activation)
self.linear2 = torch.nn.Linear(out_features, out_features, activation)
self.linear3 = torch.nn.Linear(in_features + out_features,
out_features, activation)
def forward(self, input_0):
primals_1 = self.linear1.weight
primals_2 = self.linear1.bias
primals_4 = self.linear2.weight
primals_5 = self.linear2.bias
primals_6 = self.linear3.weight
primals_7 = self.linear3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| ashwinpn/Computer-Vision | SkipModule | false | 6,261 | [
"MIT"
] | 1 | 9dc3abfe416385171b76e2bad6872e10f36a12b4 | https://github.com/ashwinpn/Computer-Vision/tree/9dc3abfe416385171b76e2bad6872e10f36a12b4 | import torch
import torch.nn
class Model(torch.nn.Module):
def __init__(self, in_features, out_features, activation=torch.nn.ReLU()):
super().__init__()
self.linear1 = torch.nn.Linear(in_features, out_features, activation)
self.linear2 = torch.nn.Linear(out_features, out_features, activation)
self.linear3 = torch.nn.Linear(in_features + out_features,
out_features, activation)
def forward(self, x):
x1 = self.linear1(x)
x1 = self.linear2(x1)
x = torch.cat((x, x1), dim=-1)
return self.linear3(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
BarlowTwinsLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ts/ctstfkrc7paolmsqy6y4oodrx7y7zl43vevbfxmce3etmgnfetoi.py
# Topologically Sorted Source Nodes: [mean, sub, std, z1_norm], Original ATen: [aten.mean, aten.sub, aten.std, aten.div]
# Source node to ATen node mapping:
# mean => mean
# std => sqrt, var
# sub => sub
# z1_norm => div
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%arg0_1, [0]), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %mean), kwargs = {})
# %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%arg0_1, [0]), kwargs = {correction: 1.0})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%var,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %sqrt), kwargs = {})
triton_poi_fused_div_mean_std_sub_0 = async_compile.triton('triton_poi_fused_div_mean_std_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_mean_std_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_mean_std_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tmp11 = tmp1 - tmp9
tmp12 = tmp11 * tmp11
tmp13 = tmp2 - tmp9
tmp14 = tmp13 * tmp13
tmp15 = tmp12 + tmp14
tmp16 = tmp4 - tmp9
tmp17 = tmp16 * tmp16
tmp18 = tmp15 + tmp17
tmp19 = tmp6 - tmp9
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = 3.0
tmp23 = tmp21 / tmp22
tmp24 = libdevice.sqrt(tmp23)
tmp25 = tmp10 / tmp24
tl.store(out_ptr0 + (x2), tmp25, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/2b/c2bijcimj2gqjrx4memimto7u2q4w3labsdqg2pqksqyyxv6rdzt.py
# Topologically Sorted Source Nodes: [cross_corr], Original ATen: [aten.div]
# Source node to ATen node mapping:
# cross_corr => div_2
# Graph fragment:
# %div_2 : [num_users=6] = call_function[target=torch.ops.aten.div.Tensor](args = (%mm, 4), kwargs = {})
triton_poi_fused_div_1 = async_compile.triton('triton_poi_fused_div_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.25
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/aq/caq2brq6igxa5rh44oq7gy67c57eo54qi7diazcnqyoudm3w5moo.py
# Topologically Sorted Source Nodes: [add_], Original ATen: [aten.add]
# Source node to ATen node mapping:
# add_ => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%diagonal, -1), kwargs = {})
# %copy__default : [num_users=0] = call_function[target=torch.ops.aten.copy_.default](args = (%diagonal_default, %add), kwargs = {})
triton_poi_fused_add_2 = async_compile.triton('triton_poi_fused_add_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_2', 'mutated_arg_names': ['out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (5*x0), xmask, eviction_policy='evict_last')
tmp1 = 0.25
tmp2 = tmp0 * tmp1
tmp3 = -1.0
tmp4 = tmp2 + tmp3
tl.store(out_ptr0 + (5*x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/wq/cwq4twtwxcsuyzn7bd2msejq5qdky2webkn3ednjowusbxy3yrcn.py
# Topologically Sorted Source Nodes: [pow_, on_diag], Original ATen: [aten.pow, aten.sum]
# Source node to ATen node mapping:
# on_diag => sum_1
# pow_ => pow_1
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%diagonal_1, 2), kwargs = {})
# %copy__default_1 : [num_users=0] = call_function[target=torch.ops.aten.copy_.default](args = (%diagonal_default_1, %pow_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%diagonal_2,), kwargs = {})
triton_per_fused_pow_sum_3 = async_compile.triton('triton_per_fused_pow_sum_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_pow_sum_3', 'mutated_arg_names': ['in_ptr0', 'out_ptr1'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_pow_sum_3(in_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (5*r0), None, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.sum(tmp2, 1)[:, None]
tl.store(out_ptr1 + (tl.broadcast_to(5*r0, [XBLOCK, RBLOCK])), tmp1, None)
tl.store(out_ptr2 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/d2/cd2zqqhktmkamh5rxsdhx54vbx6azq47tux4tor3hdkrzfmnkj4l.py
# Topologically Sorted Source Nodes: [off_diag, mul, add], Original ATen: [aten.sum, aten.mul, aten.add]
# Source node to ATen node mapping:
# add => add_1
# mul => mul
# off_diag => sum_2
# Graph fragment:
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%view_6,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_2, 0.005), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, %mul), kwargs = {})
triton_per_fused_add_mul_sum_4 = async_compile.triton('triton_per_fused_add_mul_sum_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mul_sum_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_mul_sum_4(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 12
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r0 = rindex
tmp0 = tl.load(in_ptr0 + (1 + (5*(r0 // 4)) + (5*((r0 % 4) // 4)) + (r0 % 4)), rmask, other=0.0)
tmp6 = tl.load(in_out_ptr0 + (0))
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, 1])
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(rmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp8 = 0.005
tmp9 = tmp5 * tmp8
tmp10 = tmp7 + tmp9
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp10, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mean, sub, std, z1_norm], Original ATen: [aten.mean, aten.sub, aten.std, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_div_mean_std_sub_0.run(arg0_1, buf0, 16, grid=grid(16), stream=stream0)
del arg0_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mean_1, sub_1, std_1, z2_norm], Original ATen: [aten.mean, aten.sub, aten.std, aten.div]
triton_poi_fused_div_mean_std_sub_0.run(arg1_1, buf1, 16, grid=grid(16), stream=stream0)
del arg1_1
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mean_1, sub_1, std_1, z2_norm, matmul], Original ATen: [aten.mean, aten.sub, aten.std, aten.div, aten.mm]
extern_kernels.mm(reinterpret_tensor(buf0, (4, 4), (1, 4), 0), buf1, out=buf2)
del buf0
buf3 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [cross_corr], Original ATen: [aten.div]
triton_poi_fused_div_1.run(buf2, buf3, 16, grid=grid(16), stream=stream0)
# Topologically Sorted Source Nodes: [add_], Original ATen: [aten.add]
triton_poi_fused_add_2.run(buf2, buf3, 4, grid=grid(4), stream=stream0)
del buf2
buf7 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [pow_, on_diag], Original ATen: [aten.pow, aten.sum]
triton_per_fused_pow_sum_3.run(buf3, buf3, buf7, 1, 4, grid=grid(1), stream=stream0)
buf9 = buf7; del buf7 # reuse
# Topologically Sorted Source Nodes: [off_diag, mul, add], Original ATen: [aten.sum, aten.mul, aten.add]
triton_per_fused_add_mul_sum_4.run(buf9, buf3, 1, 12, grid=grid(1), stream=stream0)
del buf3
return (buf9, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class BarlowTwinsLoss(nn.Module):
def __init__(self, batch_size, lambda_coeff=0.005, z_dim=128):
super().__init__()
self.z_dim = z_dim
self.batch_size = batch_size
self.lambda_coeff = lambda_coeff
def off_diagonal_ele(self, x):
n, m = x.shape
assert n == m
return x.flatten()[:-1].view(n - 1, n + 1)[:, 1:].flatten()
def forward(self, z1, z2):
z1_norm = (z1 - torch.mean(z1, dim=0)) / torch.std(z1, dim=0)
z2_norm = (z2 - torch.mean(z2, dim=0)) / torch.std(z2, dim=0)
cross_corr = torch.matmul(z1_norm.T, z2_norm) / self.batch_size
on_diag = torch.diagonal(cross_corr).add_(-1).pow_(2).sum()
off_diag = self.off_diagonal_ele(cross_corr).pow_(2).sum()
return on_diag + self.lambda_coeff * off_diag
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'batch_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_mean_std_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tmp11 = tmp1 - tmp9
tmp12 = tmp11 * tmp11
tmp13 = tmp2 - tmp9
tmp14 = tmp13 * tmp13
tmp15 = tmp12 + tmp14
tmp16 = tmp4 - tmp9
tmp17 = tmp16 * tmp16
tmp18 = tmp15 + tmp17
tmp19 = tmp6 - tmp9
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = 3.0
tmp23 = tmp21 / tmp22
tmp24 = libdevice.sqrt(tmp23)
tmp25 = tmp10 / tmp24
tl.store(out_ptr0 + x2, tmp25, xmask)
@triton.jit
def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.25
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 5 * x0, xmask, eviction_policy='evict_last')
tmp1 = 0.25
tmp2 = tmp0 * tmp1
tmp3 = -1.0
tmp4 = tmp2 + tmp3
tl.store(out_ptr0 + 5 * x0, tmp4, xmask)
@triton.jit
def triton_per_fused_pow_sum_3(in_ptr0, out_ptr1, out_ptr2, xnumel, rnumel,
XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 5 * r0, None, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.sum(tmp2, 1)[:, None]
tl.store(out_ptr1 + tl.broadcast_to(5 * r0, [XBLOCK, RBLOCK]), tmp1, None)
tl.store(out_ptr2 + tl.full([XBLOCK, 1], 0, tl.int32), tmp4, None)
@triton.jit
def triton_per_fused_add_mul_sum_4(in_out_ptr0, in_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
rnumel = 12
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r0 = rindex
tmp0 = tl.load(in_ptr0 + (1 + 5 * (r0 // 4) + 5 * (r0 % 4 // 4) + r0 %
4), rmask, other=0.0)
tmp6 = tl.load(in_out_ptr0 + 0)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, 1])
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(rmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp8 = 0.005
tmp9 = tmp5 * tmp8
tmp10 = tmp7 + tmp9
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp10, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_mean_std_sub_0[grid(16)](arg0_1, buf0, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_div_mean_std_sub_0[grid(16)](arg1_1, buf1, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del arg1_1
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (4, 4), (1, 4), 0), buf1,
out=buf2)
del buf0
buf3 = buf1
del buf1
triton_poi_fused_div_1[grid(16)](buf2, buf3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
triton_poi_fused_add_2[grid(4)](buf2, buf3, 4, XBLOCK=4, num_warps=
1, num_stages=1)
del buf2
buf7 = empty_strided_cuda((), (), torch.float32)
triton_per_fused_pow_sum_3[grid(1)](buf3, buf3, buf7, 1, 4, XBLOCK=
1, num_warps=2, num_stages=1)
buf9 = buf7
del buf7
triton_per_fused_add_mul_sum_4[grid(1)](buf9, buf3, 1, 12, XBLOCK=1,
num_warps=2, num_stages=1)
del buf3
return buf9,
class BarlowTwinsLossNew(nn.Module):
def __init__(self, batch_size, lambda_coeff=0.005, z_dim=128):
super().__init__()
self.z_dim = z_dim
self.batch_size = batch_size
self.lambda_coeff = lambda_coeff
def off_diagonal_ele(self, x):
n, m = x.shape
assert n == m
return x.flatten()[:-1].view(n - 1, n + 1)[:, 1:].flatten()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| ashutoshml/lightning-tutorials | BarlowTwinsLoss | false | 6,262 | [
"Apache-2.0"
] | 1 | 898b8b6f9852c0b80f034a3187bc1cd34dd521ce | https://github.com/ashutoshml/lightning-tutorials/tree/898b8b6f9852c0b80f034a3187bc1cd34dd521ce | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, batch_size, lambda_coeff=0.005, z_dim=128):
super().__init__()
self.z_dim = z_dim
self.batch_size = batch_size
self.lambda_coeff = lambda_coeff
def off_diagonal_ele(self, x):
n, m = x.shape
assert n == m
return x.flatten()[:-1].view(n - 1, n + 1)[:, 1:].flatten()
def forward(self, z1, z2):
z1_norm = (z1 - torch.mean(z1, dim=0)) / torch.std(z1, dim=0)
z2_norm = (z2 - torch.mean(z2, dim=0)) / torch.std(z2, dim=0)
cross_corr = torch.matmul(z1_norm.T, z2_norm) / self.batch_size
on_diag = torch.diagonal(cross_corr).add_(-1).pow_(2).sum()
off_diag = self.off_diagonal_ele(cross_corr).pow_(2).sum()
return on_diag + self.lambda_coeff * off_diag
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [4]
|
L2Norm | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/dw/cdwecwssm3wtmxsvs2kudgljb4a2t55bg5xziiwfbr2wlehdvnmj.py
# Topologically Sorted Source Nodes: [x, out], Original ATen: [aten.div, aten.mul]
# Source node to ATen node mapping:
# out => mul
# x => div
# Graph fragment:
# %div : [num_users=3] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %expand), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expand_1, %div), kwargs = {})
# %copy_ : [num_users=0] = call_function[target=torch.ops.aten.copy_.default](args = (%primals_1, %div), kwargs = {})
triton_poi_fused_div_mul_0 = async_compile.triton('triton_poi_fused_div_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_mul_0', 'mutated_arg_names': ['in_ptr0', 'out_ptr2'], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_mul_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-10
tmp14 = tmp12 + tmp13
tmp15 = tmp0 / tmp14
tmp17 = tmp16 * tmp15
tl.store(out_ptr0 + (x3), tmp15, xmask)
tl.store(out_ptr1 + (x3), tmp17, xmask)
tl.store(out_ptr2 + (x3), tmp15, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x, out], Original ATen: [aten.div, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_div_mul_0.run(primals_1, primals_2, buf0, buf1, primals_1, 256, grid=grid(256), stream=stream0)
del primals_1
del primals_2
return (buf1, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from math import sqrt as sqrt
from itertools import product as product
import torch.nn.init as init
class L2Norm(nn.Module):
def __init__(self, n_channels, scale):
super(L2Norm, self).__init__()
self.n_channels = n_channels
self.gamma = scale or None
self.eps = 1e-10
self.weight = nn.Parameter(torch.Tensor(self.n_channels))
self.reset_parameters()
def reset_parameters(self):
init.constant(self.weight, self.gamma)
def forward(self, x):
norm = x.pow(2).sum(1).sqrt() + self.eps
x /= norm.expand_as(x)
out = self.weight.unsqueeze(0).unsqueeze(2).unsqueeze(3).expand_as(x
) * x
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_channels': 4, 'scale': 1.0}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
from math import sqrt as sqrt
from itertools import product as product
import torch.nn.init as init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_mul_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask, eviction_policy=
'evict_last')
tmp16 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-10
tmp14 = tmp12 + tmp13
tmp15 = tmp0 / tmp14
tmp17 = tmp16 * tmp15
tl.store(out_ptr0 + x3, tmp15, xmask)
tl.store(out_ptr1 + x3, tmp17, xmask)
tl.store(out_ptr2 + x3, tmp15, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_mul_0[grid(256)](primals_1, primals_2, buf0,
buf1, primals_1, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_2
return buf1, buf0
class L2NormNew(nn.Module):
def __init__(self, n_channels, scale):
super(L2NormNew, self).__init__()
self.n_channels = n_channels
self.gamma = scale or None
self.eps = 1e-10
self.weight = nn.Parameter(torch.Tensor(self.n_channels))
self.reset_parameters()
def reset_parameters(self):
init.constant(self.weight, self.gamma)
def forward(self, input_0):
primals_2 = self.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
| ashwath007/amenity-detection | L2Norm | false | 6,263 | [
"Apache-2.0"
] | 1 | acb885eb4d791acc6e65237445a4fc6830e4d30c | https://github.com/ashwath007/amenity-detection/tree/acb885eb4d791acc6e65237445a4fc6830e4d30c | import torch
import torch.nn as nn
from math import sqrt as sqrt
from itertools import product as product
import torch.nn.init as init
class Model(nn.Module):
def __init__(self, n_channels, scale):
super().__init__()
self.n_channels = n_channels
self.gamma = scale or None
self.eps = 1e-10
self.weight = nn.Parameter(torch.Tensor(self.n_channels))
self.reset_parameters()
def reset_parameters(self):
init.constant(self.weight, self.gamma)
def forward(self, x):
norm = x.pow(2).sum(1).sqrt() + self.eps
x /= norm.expand_as(x)
out = self.weight.unsqueeze(0).unsqueeze(2).unsqueeze(3).expand_as(x
) * x
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 1.0]
|
SirenModule | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/vp/cvpc55lt3l2owcwndt777vegsrq4gm7oa7jxzrn47qc6xud2rego.py
# Topologically Sorted Source Nodes: [sin], Original ATen: [aten.sin]
# Source node to ATen node mapping:
# sin => sin
# Graph fragment:
# %sin : [num_users=1] = call_function[target=torch.ops.aten.sin.default](args = (%view_1,), kwargs = {})
triton_poi_fused_sin_0 = async_compile.triton('triton_poi_fused_sin_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sin_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sin_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl_math.sin(tmp0)
tl.store(out_ptr0 + (x0), tmp1, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sin], Original ATen: [aten.sin]
stream0 = get_raw_stream(0)
triton_poi_fused_sin_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0)
return (buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn
class SirenModule(torch.nn.Module):
def __init__(self, in_features, out_features, weight_multiplier=1.0):
super(SirenModule, self).__init__()
self.linear = torch.nn.Linear(in_features, out_features)
init_bounds = math.sqrt(6 / in_features) * weight_multiplier
torch.nn.init.uniform_(self.linear.weight, a=-init_bounds, b=
init_bounds)
def forward(self, x):
return torch.sin(self.linear(x))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_sin_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl_math.sin(tmp0)
tl.store(out_ptr0 + x0, tmp1, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_sin_0[grid(256)](buf0, buf1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0
class SirenModuleNew(torch.nn.Module):
def __init__(self, in_features, out_features, weight_multiplier=1.0):
super(SirenModuleNew, self).__init__()
self.linear = torch.nn.Linear(in_features, out_features)
init_bounds = math.sqrt(6 / in_features) * weight_multiplier
torch.nn.init.uniform_(self.linear.weight, a=-init_bounds, b=
init_bounds)
def forward(self, input_0):
primals_1 = self.linear.weight
primals_2 = self.linear.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| ashwinpn/Computer-Vision | SirenModule | false | 6,264 | [
"MIT"
] | 1 | 9dc3abfe416385171b76e2bad6872e10f36a12b4 | https://github.com/ashwinpn/Computer-Vision/tree/9dc3abfe416385171b76e2bad6872e10f36a12b4 | import math
import torch
import torch.nn
class Model(torch.nn.Module):
def __init__(self, in_features, out_features, weight_multiplier=1.0):
super().__init__()
self.linear = torch.nn.Linear(in_features, out_features)
init_bounds = math.sqrt(6 / in_features) * weight_multiplier
torch.nn.init.uniform_(self.linear.weight, a=-init_bounds, b=
init_bounds)
def forward(self, x):
return torch.sin(self.linear(x))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
Normalization | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/h3/ch363nqfhhjkzrml3vnkpfci6bn63dfhoqmhs4zrl2eudcko4y4o.py
# Topologically Sorted Source Nodes: [sub, truediv], Original ATen: [aten.sub, aten.div]
# Source node to ATen node mapping:
# sub => sub
# truediv => div
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %arg0_1), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %arg2_1), kwargs = {})
triton_poi_fused_div_sub_0 = async_compile.triton('triton_poi_fused_div_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 3
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 / tmp3
tl.store(out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (3, 1, 1), (1, 1, 1))
assert_size_stride(arg1_1, (4, 3, 4, 4), (48, 16, 4, 1))
assert_size_stride(arg2_1, (3, 1, 1), (1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 3, 4, 4), (48, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sub, truediv], Original ATen: [aten.sub, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_div_sub_0.run(arg1_1, arg0_1, arg2_1, buf0, 192, grid=grid(192), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((3, 1, 1), (1, 1, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 3, 4, 4), (48, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((3, 1, 1), (1, 1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class Normalization(nn.Module):
def __init__(self, mean=torch.zeros(3), std=torch.ones(3)):
super(Normalization, self).__init__()
self.mean = nn.Parameter(mean.view(-1, 1, 1), requires_grad=False)
self.std = nn.Parameter(std.view(-1, 1, 1), requires_grad=False)
def forward(self, img):
return (img - self.mean) / self.std
def get_inputs():
return [torch.rand([4, 3, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 3
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 / tmp3
tl.store(out_ptr0 + x3, tmp4, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (3, 1, 1), (1, 1, 1))
assert_size_stride(arg1_1, (4, 3, 4, 4), (48, 16, 4, 1))
assert_size_stride(arg2_1, (3, 1, 1), (1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 3, 4, 4), (48, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_sub_0[grid(192)](arg1_1, arg0_1, arg2_1, buf0,
192, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf0,
class NormalizationNew(nn.Module):
def __init__(self, mean=torch.zeros(3), std=torch.ones(3)):
super(NormalizationNew, self).__init__()
self.mean = nn.Parameter(mean.view(-1, 1, 1), requires_grad=False)
self.std = nn.Parameter(std.view(-1, 1, 1), requires_grad=False)
def forward(self, input_0):
arg0_1 = self.mean
arg2_1 = self.std
arg1_1 = input_0
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| asjir/adain | Normalization | false | 6,265 | [
"MIT"
] | 1 | 1d0f70f161e485ce61ea57ab619d66e8f4ccadde | https://github.com/asjir/adain/tree/1d0f70f161e485ce61ea57ab619d66e8f4ccadde | import torch
from torch import nn
class Model(nn.Module):
def __init__(self, mean=torch.zeros(3), std=torch.ones(3)):
super().__init__()
self.mean = nn.Parameter(mean.view(-1, 1, 1), requires_grad=False)
self.std = nn.Parameter(std.view(-1, 1, 1), requires_grad=False)
def forward(self, img):
return (img - self.mean) / self.std
def get_inputs():
return [torch.rand([4, 3, 4, 4])]
def get_init_inputs():
return []
|
BCELoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/a6/ca625ch2ma4nxamrbh6aiqjarhys2ayutwxi7mqpfwzyf5cgoavx.py
# Topologically Sorted Source Nodes: [loss, mul], Original ATen: [aten.binary_cross_entropy, aten.mul]
# Source node to ATen node mapping:
# loss => full_default, full_default_1, log, log1p, maximum, maximum_1, mean, mul, mul_1, neg, sub, sub_1
# mul => mul_2
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, 1), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg1_1,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%neg,), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -100), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %maximum : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%log1p, %full_default), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %maximum), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%arg1_1,), kwargs = {})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -100), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %maximum_1 : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%log, %full_default_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %maximum_1), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %mul_1), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_1,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 1.0), kwargs = {})
triton_per_fused_binary_cross_entropy_mul_0 = async_compile.triton('triton_per_fused_binary_cross_entropy_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_binary_cross_entropy_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_binary_cross_entropy_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp3 = tl.load(in_ptr1 + (r0), None)
tmp1 = 1.0
tmp2 = tmp0 - tmp1
tmp4 = -tmp3
tmp5 = libdevice.log1p(tmp4)
tmp6 = -100.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp2 * tmp7
tmp9 = tl_math.log(tmp3)
tmp10 = triton_helpers.maximum(tmp9, tmp6)
tmp11 = tmp0 * tmp10
tmp12 = tmp8 - tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = 256.0
tmp17 = tmp15 / tmp16
tmp18 = tmp17 * tmp1
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp18, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [loss, mul], Original ATen: [aten.binary_cross_entropy, aten.mul]
stream0 = get_raw_stream(0)
triton_per_fused_binary_cross_entropy_mul_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class BCELoss(nn.Module):
"""Binary Cross Entropy loss."""
def __init__(self, use_target_weight=False, loss_weight=1.0):
super().__init__()
self.criterion = F.binary_cross_entropy
self.use_target_weight = use_target_weight
self.loss_weight = loss_weight
def forward(self, output, target, target_weight=None):
"""Forward function.
Note:
- batch_size: N
- num_labels: K
Args:
output (torch.Tensor[N, K]): Output classification.
target (torch.Tensor[N, K]): Target classification.
target_weight (torch.Tensor[N, K] or torch.Tensor[N]):
Weights across different labels.
"""
if self.use_target_weight:
assert target_weight is not None
loss = self.criterion(output, target, reduction='none')
if target_weight.dim() == 1:
target_weight = target_weight[:, None]
loss = (loss * target_weight).mean()
else:
loss = self.criterion(output, target)
return loss * self.loss_weight
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_binary_cross_entropy_mul_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp1 = 1.0
tmp2 = tmp0 - tmp1
tmp4 = -tmp3
tmp5 = libdevice.log1p(tmp4)
tmp6 = -100.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp2 * tmp7
tmp9 = tl_math.log(tmp3)
tmp10 = triton_helpers.maximum(tmp9, tmp6)
tmp11 = tmp0 * tmp10
tmp12 = tmp8 - tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = 256.0
tmp17 = tmp15 / tmp16
tmp18 = tmp17 * tmp1
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp18, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_binary_cross_entropy_mul_0[grid(1)](buf1, arg0_1,
arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class BCELossNew(nn.Module):
"""Binary Cross Entropy loss."""
def __init__(self, use_target_weight=False, loss_weight=1.0):
super().__init__()
self.criterion = F.binary_cross_entropy
self.use_target_weight = use_target_weight
self.loss_weight = loss_weight
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| atoaiari/mmpose | BCELoss | false | 6,266 | [
"Apache-2.0"
] | 1 | 256a9117767008e8c33b4038a346aca12233e300 | https://github.com/atoaiari/mmpose/tree/256a9117767008e8c33b4038a346aca12233e300 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
"""Binary Cross Entropy loss."""
def __init__(self, use_target_weight=False, loss_weight=1.0):
super().__init__()
self.criterion = F.binary_cross_entropy
self.use_target_weight = use_target_weight
self.loss_weight = loss_weight
def forward(self, output, target, target_weight=None):
"""Forward function.
Note:
- batch_size: N
- num_labels: K
Args:
output (torch.Tensor[N, K]): Output classification.
target (torch.Tensor[N, K]): Target classification.
target_weight (torch.Tensor[N, K] or torch.Tensor[N]):
Weights across different labels.
"""
if self.use_target_weight:
assert target_weight is not None
loss = self.criterion(output, target, reduction='none')
if target_weight.dim() == 1:
target_weight = target_weight[:, None]
loss = (loss * target_weight).mean()
else:
loss = self.criterion(output, target)
return loss * self.loss_weight
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
KLDLossWithStandardGaussian | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/5v/c5vtwldilipwi3r3wx5rsxypbvfmsav7s7xybl5bf72d3sckrknw.py
# Topologically Sorted Source Nodes: [add, pow_1, sub, exp, sub_1, sum_1, mul], Original ATen: [aten.add, aten.pow, aten.sub, aten.exp, aten.sum, aten.mul]
# Source node to ATen node mapping:
# add => add
# exp => exp
# mul => mul
# pow_1 => pow_1
# sub => sub
# sub_1 => sub_1
# sum_1 => sum_1
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg1_1, 2), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %pow_1), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%arg0_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %exp), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%sub_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, -0.5), kwargs = {})
triton_per_fused_add_exp_mul_pow_sub_sum_0 = async_compile.triton('triton_per_fused_add_exp_mul_pow_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_exp_mul_pow_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_exp_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp3 = tl.load(in_ptr1 + (r0), None)
tmp1 = 1.0
tmp2 = tmp0 + tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 - tmp4
tmp6 = tl_math.exp(tmp0)
tmp7 = tmp5 - tmp6
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0))
tmp11 = -0.5
tmp12 = tmp10 * tmp11
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp12, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [add, pow_1, sub, exp, sub_1, sum_1, mul], Original ATen: [aten.add, aten.pow, aten.sub, aten.exp, aten.sum, aten.mul]
stream0 = get_raw_stream(0)
triton_per_fused_add_exp_mul_pow_sub_sum_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
class KLDLossWithStandardGaussian(nn.Module):
def forward(self, mu, logvar):
return -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_exp_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp1 = 1.0
tmp2 = tmp0 + tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 - tmp4
tmp6 = tl_math.exp(tmp0)
tmp7 = tmp5 - tmp6
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0))
tmp11 = -0.5
tmp12 = tmp10 * tmp11
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp12, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_exp_mul_pow_sub_sum_0[grid(1)](buf1, arg0_1,
arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class KLDLossWithStandardGaussianNew(nn.Module):
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| atmacvit/meronymnet | KLDLossWithStandardGaussian | false | 6,267 | [
"MIT"
] | 1 | 47e1a7caadc0f770439bb26a93b885f790f62804 | https://github.com/atmacvit/meronymnet/tree/47e1a7caadc0f770439bb26a93b885f790f62804 | import torch
import torch.nn as nn
import torch.utils.data
class Model(nn.Module):
def forward(self, mu, logvar):
return -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
KLDLossWithStandardGaussianNoReduction | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/oz/cozelbvxfzuzuwwn7bvetduh42pghdw23gjrjlkcdpsidbb244xb.py
# Topologically Sorted Source Nodes: [add, pow_1, sub, exp, sub_1, sum_1, KLD], Original ATen: [aten.add, aten.pow, aten.sub, aten.exp, aten.sum, aten.mul]
# Source node to ATen node mapping:
# KLD => mul
# add => add
# exp => exp
# pow_1 => pow_1
# sub => sub
# sub_1 => sub_1
# sum_1 => sum_1
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg1_1, 2), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %pow_1), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%arg0_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %exp), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sub_1, [-1]), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, -0.5), kwargs = {})
triton_poi_fused_add_exp_mul_pow_sub_sum_0 = async_compile.triton('triton_poi_fused_add_exp_mul_pow_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_exp_mul_pow_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_exp_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 + tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 - tmp4
tmp6 = tl_math.exp(tmp0)
tmp7 = tmp5 - tmp6
tmp9 = tmp8 + tmp1
tmp11 = tmp10 * tmp10
tmp12 = tmp9 - tmp11
tmp13 = tl_math.exp(tmp8)
tmp14 = tmp12 - tmp13
tmp15 = tmp7 + tmp14
tmp17 = tmp16 + tmp1
tmp19 = tmp18 * tmp18
tmp20 = tmp17 - tmp19
tmp21 = tl_math.exp(tmp16)
tmp22 = tmp20 - tmp21
tmp23 = tmp15 + tmp22
tmp25 = tmp24 + tmp1
tmp27 = tmp26 * tmp26
tmp28 = tmp25 - tmp27
tmp29 = tl_math.exp(tmp24)
tmp30 = tmp28 - tmp29
tmp31 = tmp23 + tmp30
tmp32 = -0.5
tmp33 = tmp31 * tmp32
tl.store(in_out_ptr0 + (x0), tmp33, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [add, pow_1, sub, exp, sub_1, sum_1, KLD], Original ATen: [aten.add, aten.pow, aten.sub, aten.exp, aten.sum, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_add_exp_mul_pow_sub_sum_0.run(buf1, arg0_1, arg1_1, 64, grid=grid(64), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
class KLDLossWithStandardGaussianNoReduction(nn.Module):
def forward(self, mu, logvar):
KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp(), dim=-1)
return KLD
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_exp_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp16 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp24 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp26 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 + tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 - tmp4
tmp6 = tl_math.exp(tmp0)
tmp7 = tmp5 - tmp6
tmp9 = tmp8 + tmp1
tmp11 = tmp10 * tmp10
tmp12 = tmp9 - tmp11
tmp13 = tl_math.exp(tmp8)
tmp14 = tmp12 - tmp13
tmp15 = tmp7 + tmp14
tmp17 = tmp16 + tmp1
tmp19 = tmp18 * tmp18
tmp20 = tmp17 - tmp19
tmp21 = tl_math.exp(tmp16)
tmp22 = tmp20 - tmp21
tmp23 = tmp15 + tmp22
tmp25 = tmp24 + tmp1
tmp27 = tmp26 * tmp26
tmp28 = tmp25 - tmp27
tmp29 = tl_math.exp(tmp24)
tmp30 = tmp28 - tmp29
tmp31 = tmp23 + tmp30
tmp32 = -0.5
tmp33 = tmp31 * tmp32
tl.store(in_out_ptr0 + x0, tmp33, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_add_exp_mul_pow_sub_sum_0[grid(64)](buf1, arg0_1,
arg1_1, 64, XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class KLDLossWithStandardGaussianNoReductionNew(nn.Module):
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| atmacvit/meronymnet | KLDLossWithStandardGaussianNoReduction | false | 6,268 | [
"MIT"
] | 1 | 47e1a7caadc0f770439bb26a93b885f790f62804 | https://github.com/atmacvit/meronymnet/tree/47e1a7caadc0f770439bb26a93b885f790f62804 | import torch
import torch.nn as nn
import torch.utils.data
class Model(nn.Module):
def forward(self, mu, logvar):
KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp(), dim=-1)
return KLD
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
MSELoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/7p/c7po76azmxaenhqdhxmjkwmmyrh5jpr4p5zydgfurvfy3q5qm6gp.py
# Topologically Sorted Source Nodes: [loss, mul], Original ATen: [aten.mse_loss, aten.mul]
# Source node to ATen node mapping:
# loss => mean, pow_1, sub
# mul => mul
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %arg0_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 1.0), kwargs = {})
triton_per_fused_mse_loss_mul_0 = async_compile.triton('triton_per_fused_mse_loss_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mse_loss_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mse_loss_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = 256.0
tmp8 = tmp6 / tmp7
tmp9 = 1.0
tmp10 = tmp8 * tmp9
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp10, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [loss, mul], Original ATen: [aten.mse_loss, aten.mul]
stream0 = get_raw_stream(0)
triton_per_fused_mse_loss_mul_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class MSELoss(nn.Module):
"""MSE loss for coordinate regression."""
def __init__(self, use_target_weight=False, loss_weight=1.0):
super().__init__()
self.criterion = F.mse_loss
self.use_target_weight = use_target_weight
self.loss_weight = loss_weight
def forward(self, output, target, target_weight=None):
"""Forward function.
Note:
- batch_size: N
- num_keypoints: K
Args:
output (torch.Tensor[N, K, 2]): Output regression.
target (torch.Tensor[N, K, 2]): Target regression.
target_weight (torch.Tensor[N, K, 2]):
Weights across different joint types.
"""
if self.use_target_weight:
assert target_weight is not None
loss = self.criterion(output * target_weight, target *
target_weight)
else:
loss = self.criterion(output, target)
return loss * self.loss_weight
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mse_loss_mul_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = 256.0
tmp8 = tmp6 / tmp7
tmp9 = 1.0
tmp10 = tmp8 * tmp9
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp10, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mse_loss_mul_0[grid(1)](buf1, arg1_1, arg0_1, 1,
256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class MSELossNew(nn.Module):
"""MSE loss for coordinate regression."""
def __init__(self, use_target_weight=False, loss_weight=1.0):
super().__init__()
self.criterion = F.mse_loss
self.use_target_weight = use_target_weight
self.loss_weight = loss_weight
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| atoaiari/mmpose | MSELoss | false | 6,269 | [
"Apache-2.0"
] | 1 | 256a9117767008e8c33b4038a346aca12233e300 | https://github.com/atoaiari/mmpose/tree/256a9117767008e8c33b4038a346aca12233e300 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
"""MSE loss for coordinate regression."""
def __init__(self, use_target_weight=False, loss_weight=1.0):
super().__init__()
self.criterion = F.mse_loss
self.use_target_weight = use_target_weight
self.loss_weight = loss_weight
def forward(self, output, target, target_weight=None):
"""Forward function.
Note:
- batch_size: N
- num_keypoints: K
Args:
output (torch.Tensor[N, K, 2]): Output regression.
target (torch.Tensor[N, K, 2]): Target regression.
target_weight (torch.Tensor[N, K, 2]):
Weights across different joint types.
"""
if self.use_target_weight:
assert target_weight is not None
loss = self.criterion(output * target_weight, target *
target_weight)
else:
loss = self.criterion(output, target)
return loss * self.loss_weight
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
SpatialEmbedding | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/j4/cj42y3ycik4bz72xcuz6dg2n7ynzf3mjr6cbopfd37chfwo2atfp.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%mul, %mul_1], -1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tl_math.sin(tmp6)
tmp8 = tmp5 * tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp4, tmp8, tmp9)
tmp11 = tmp0 >= tmp3
tmp12 = tl.full([1], 8, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tl.load(in_ptr0 + ((-4) + x0), tmp11 & xmask, eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp11 & xmask, eviction_policy='evict_last', other=0.0)
tmp16 = tl_math.cos(tmp15)
tmp17 = tmp14 * tmp16
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp11, tmp17, tmp18)
tmp20 = tl.where(tmp4, tmp10, tmp19)
tl.store(out_ptr0 + (x2), tmp20, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), primals_1, out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_3, buf0, buf1, 512, grid=grid(512), stream=stream0)
return (buf1, primals_3, buf0, reinterpret_tensor(primals_2, (4, 64), (1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn
class SpatialEmbedding(torch.nn.Module):
def __init__(self, in_features, out_features, weight_multiplier=1.0):
super(SpatialEmbedding, self).__init__()
self.b = torch.zeros((in_features, out_features))
self.b.normal_(0, weight_multiplier)
self.b = torch.nn.Parameter(2.0 ** self.b - 1)
self.osize = out_features
self.a = torch.nn.Parameter(torch.ones(out_features))
def forward(self, x):
x = torch.matmul(x, self.b)
return torch.cat([self.a * torch.sin(x), self.a * torch.cos(x)], dim=-1
)
def output_size(self):
return 2 * self.osize
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x0, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp6 = tl.load(in_ptr1 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp7 = tl_math.sin(tmp6)
tmp8 = tmp5 * tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp4, tmp8, tmp9)
tmp11 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp14 = tl.load(in_ptr0 + (-4 + x0), tmp11 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp15 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp11 & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tl_math.cos(tmp15)
tmp17 = tmp14 * tmp16
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp11, tmp17, tmp18)
tmp20 = tl.where(tmp4, tmp10, tmp19)
tl.store(out_ptr0 + x2, tmp20, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
primals_1, out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](primals_3, buf0, buf1, 512,
XBLOCK=128, num_warps=4, num_stages=1)
return buf1, primals_3, buf0, reinterpret_tensor(primals_2, (4, 64), (1,
4), 0)
class SpatialEmbeddingNew(torch.nn.Module):
def __init__(self, in_features, out_features, weight_multiplier=1.0):
super(SpatialEmbeddingNew, self).__init__()
self.b = torch.zeros((in_features, out_features))
self.b.normal_(0, weight_multiplier)
self.b = torch.nn.Parameter(2.0 ** self.b - 1)
self.osize = out_features
self.a = torch.nn.Parameter(torch.ones(out_features))
def output_size(self):
return 2 * self.osize
def forward(self, input_0):
primals_1 = self.b
primals_3 = self.a
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| ashwinpn/Computer-Vision | SpatialEmbedding | false | 6,270 | [
"MIT"
] | 1 | 9dc3abfe416385171b76e2bad6872e10f36a12b4 | https://github.com/ashwinpn/Computer-Vision/tree/9dc3abfe416385171b76e2bad6872e10f36a12b4 | import torch
import torch.nn
class Model(torch.nn.Module):
def __init__(self, in_features, out_features, weight_multiplier=1.0):
super().__init__()
self.b = torch.zeros((in_features, out_features))
self.b.normal_(0, weight_multiplier)
self.b = torch.nn.Parameter(2.0 ** self.b - 1)
self.osize = out_features
self.a = torch.nn.Parameter(torch.ones(out_features))
def forward(self, x):
x = torch.matmul(x, self.b)
return torch.cat([self.a * torch.sin(x), self.a * torch.cos(x)], dim=-1
)
def output_size(self):
return 2 * self.osize
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
KLDLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/wv/cwvgp3zreybkdyzy4gdnxr4opv4vony3i3dnofn3ak5ikojofcxm.py
# Topologically Sorted Source Nodes: [mul_1, sigma2, mul, sigma1, truediv, add, log, exp_2, sub, pow_1, add_1, exp_3, mul_2, add_2, truediv_1, add_3, kld, sum_1, truediv_2], Original ATen: [aten.mul, aten.exp, aten.div, aten.add, aten.log, aten.sub, aten.pow, aten.sum]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# add_2 => add_2
# add_3 => add_3
# exp_2 => exp_2
# exp_3 => exp_3
# kld => sub_1
# log => log
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# pow_1 => pow_1
# sigma1 => exp
# sigma2 => exp_1
# sub => sub
# sum_1 => sum_1
# truediv => div
# truediv_1 => div_1
# truediv_2 => div_2
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg2_1, 0.5), kwargs = {})
# %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, 0.5), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_1, %exp), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, 1e-08), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add,), kwargs = {})
# %exp_2 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%arg1_1,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg3_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%exp_2, %pow_1), kwargs = {})
# %exp_3 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%arg2_1,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%exp_3, 2), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, 1e-08), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_1, %add_2), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%log, %div_1), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, 0.5), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%sub_1,), kwargs = {})
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, 4), kwargs = {})
triton_per_fused_add_div_exp_log_mul_pow_sub_sum_0 = async_compile.triton('triton_per_fused_add_div_exp_log_mul_pow_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {5: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=(5,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_exp_log_mul_pow_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_exp_log_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp4 = tl.load(in_ptr1 + (r0), None)
tmp12 = tl.load(in_ptr2 + (r0), None)
tmp13 = tl.load(in_ptr3 + (r0), None)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp5 = tmp4 * tmp1
tmp6 = tl_math.exp(tmp5)
tmp7 = tmp3 / tmp6
tmp8 = 1e-08
tmp9 = tmp7 + tmp8
tmp10 = tl_math.log(tmp9)
tmp11 = tl_math.exp(tmp4)
tmp14 = tmp12 - tmp13
tmp15 = tmp14 * tmp14
tmp16 = tmp11 + tmp15
tmp17 = tl_math.exp(tmp0)
tmp18 = 2.0
tmp19 = tmp17 * tmp18
tmp20 = tmp19 + tmp8
tmp21 = tmp16 / tmp20
tmp22 = tmp10 + tmp21
tmp23 = tmp22 - tmp1
tmp24 = tl.broadcast_to(tmp23, [RBLOCK])
tmp26 = triton_helpers.promote_to_tensor(tl.sum(tmp24, 0))
tmp27 = 0.25
tmp28 = tmp26 * tmp27
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp28, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [mul_1, sigma2, mul, sigma1, truediv, add, log, exp_2, sub, pow_1, add_1, exp_3, mul_2, add_2, truediv_1, add_3, kld, sum_1, truediv_2], Original ATen: [aten.mul, aten.exp, aten.div, aten.add, aten.log, aten.sub, aten.pow, aten.sum]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_exp_log_mul_pow_sub_sum_0.run(buf1, arg2_1, arg1_1, arg0_1, arg3_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg3_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
class KLDLoss(nn.Module):
def forward(self, mu1, logvar1, mu2, logvar2):
batch_size = mu1.shape[0]
sigma1 = logvar1.mul(0.5).exp()
sigma2 = logvar2.mul(0.5).exp()
kld = torch.log(sigma2 / sigma1 + 1e-08) + (torch.exp(logvar1) + (
mu1 - mu2) ** 2) / (2 * torch.exp(logvar2) + 1e-08) - 1 / 2
return kld.sum() / batch_size
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_exp_log_mul_pow_sub_sum_0(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp4 = tl.load(in_ptr1 + r0, None)
tmp12 = tl.load(in_ptr2 + r0, None)
tmp13 = tl.load(in_ptr3 + r0, None)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp5 = tmp4 * tmp1
tmp6 = tl_math.exp(tmp5)
tmp7 = tmp3 / tmp6
tmp8 = 1e-08
tmp9 = tmp7 + tmp8
tmp10 = tl_math.log(tmp9)
tmp11 = tl_math.exp(tmp4)
tmp14 = tmp12 - tmp13
tmp15 = tmp14 * tmp14
tmp16 = tmp11 + tmp15
tmp17 = tl_math.exp(tmp0)
tmp18 = 2.0
tmp19 = tmp17 * tmp18
tmp20 = tmp19 + tmp8
tmp21 = tmp16 / tmp20
tmp22 = tmp10 + tmp21
tmp23 = tmp22 - tmp1
tmp24 = tl.broadcast_to(tmp23, [RBLOCK])
tmp26 = triton_helpers.promote_to_tensor(tl.sum(tmp24, 0))
tmp27 = 0.25
tmp28 = tmp26 * tmp27
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp28, None)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_div_exp_log_mul_pow_sub_sum_0[grid(1)](buf1,
arg2_1, arg1_1, arg0_1, arg3_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
return buf1,
class KLDLossNew(nn.Module):
def forward(self, input_0, input_1, input_2, input_3):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
arg3_1 = input_3
output = call([arg0_1, arg1_1, arg2_1, arg3_1])
return output[0]
| atmacvit/meronymnet | KLDLoss | false | 6,271 | [
"MIT"
] | 1 | 47e1a7caadc0f770439bb26a93b885f790f62804 | https://github.com/atmacvit/meronymnet/tree/47e1a7caadc0f770439bb26a93b885f790f62804 | import torch
import torch.nn as nn
import torch.utils.data
class Model(nn.Module):
def forward(self, mu1, logvar1, mu2, logvar2):
batch_size = mu1.shape[0]
sigma1 = logvar1.mul(0.5).exp()
sigma2 = logvar2.mul(0.5).exp()
kld = torch.log(sigma2 / sigma1 + 1e-08) + (torch.exp(logvar1) + (
mu1 - mu2) ** 2) / (2 * torch.exp(logvar2) + 1e-08) - 1 / 2
return kld.sum() / batch_size
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
CuboidPoseHead | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/wy/cwye4gfk4ug6fholrtujqkcyckps5nmpvc74fhtecqjpl343zhyh.py
# Topologically Sorted Source Nodes: [x_1, x_2, human_poses], Original ATen: [aten._softmax, aten.mul, aten.sum]
# Source node to ATen node mapping:
# human_poses => sum_2
# x_1 => div, exp, sum_1
# x_2 => mul_1
# Graph fragment:
# %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, 1), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [2], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {})
# %mul_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_tensor, 4), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%mul_tensor_1,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %unsqueeze), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [2]), kwargs = {})
triton_poi_fused__softmax_mul_sum_0 = async_compile.triton('triton_poi_fused__softmax_mul_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_mul_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = (xindex // 4)
x4 = xindex % 16
x5 = xindex
tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (x4), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = tmp2 - tmp2
tmp4 = 4.0
tmp5 = tmp3 * tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tmp6 / tmp6
tmp9 = tmp7 * tmp8
tl.store(out_ptr0 + (x5), tmp9, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1, x_2, human_poses], Original ATen: [aten._softmax, aten.mul, aten.sum]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_mul_sum_0.run(arg0_1, arg1_1, buf0, 64, grid=grid(64), stream=stream0)
del arg0_1
del arg1_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class CuboidPoseHead(nn.Module):
def __init__(self, beta):
"""Get results from the 3D human pose heatmap. Instead of obtaining
maximums on the heatmap, this module regresses the coordinates of
keypoints via integral pose regression. Refer to `paper.
<https://arxiv.org/abs/2004.06239>` for more details.
Args:
beta: Constant to adjust the magnification of soft-maxed heatmap.
"""
super(CuboidPoseHead, self).__init__()
self.beta = beta
self.loss = nn.L1Loss()
def forward(self, heatmap_volumes, grid_coordinates):
"""
Args:
heatmap_volumes (torch.Tensor(NxKxLxWxH)):
3D human pose heatmaps predicted by the network.
grid_coordinates (torch.Tensor(Nx(LxWxH)x3)):
Coordinates of the grids in the heatmap volumes.
Returns:
human_poses (torch.Tensor(NxKx3)): Coordinates of human poses.
"""
batch_size = heatmap_volumes.size(0)
channel = heatmap_volumes.size(1)
x = heatmap_volumes.reshape(batch_size, channel, -1, 1)
x = F.softmax(self.beta * x, dim=2)
grid_coordinates = grid_coordinates.unsqueeze(1)
x = torch.mul(x, grid_coordinates)
human_poses = torch.sum(x, dim=2)
return human_poses
def get_loss(self, preds, targets, weights):
return dict(loss_pose=self.loss(preds * weights, targets * weights))
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'beta': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__softmax_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex // 4
x4 = xindex % 16
x5 = xindex
tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = tmp2 - tmp2
tmp4 = 4.0
tmp5 = tmp3 * tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tmp6 / tmp6
tmp9 = tmp7 * tmp8
tl.store(out_ptr0 + x5, tmp9, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_mul_sum_0[grid(64)](arg0_1, arg1_1, buf0,
64, XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class CuboidPoseHeadNew(nn.Module):
def __init__(self, beta):
"""Get results from the 3D human pose heatmap. Instead of obtaining
maximums on the heatmap, this module regresses the coordinates of
keypoints via integral pose regression. Refer to `paper.
<https://arxiv.org/abs/2004.06239>` for more details.
Args:
beta: Constant to adjust the magnification of soft-maxed heatmap.
"""
super(CuboidPoseHeadNew, self).__init__()
self.beta = beta
self.loss = nn.L1Loss()
def get_loss(self, preds, targets, weights):
return dict(loss_pose=self.loss(preds * weights, targets * weights))
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| atoaiari/mmpose | CuboidPoseHead | false | 6,272 | [
"Apache-2.0"
] | 1 | 256a9117767008e8c33b4038a346aca12233e300 | https://github.com/atoaiari/mmpose/tree/256a9117767008e8c33b4038a346aca12233e300 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, beta):
"""Get results from the 3D human pose heatmap. Instead of obtaining
maximums on the heatmap, this module regresses the coordinates of
keypoints via integral pose regression. Refer to `paper.
<https://arxiv.org/abs/2004.06239>` for more details.
Args:
beta: Constant to adjust the magnification of soft-maxed heatmap.
"""
super().__init__()
self.beta = beta
self.loss = nn.L1Loss()
def forward(self, heatmap_volumes, grid_coordinates):
"""
Args:
heatmap_volumes (torch.Tensor(NxKxLxWxH)):
3D human pose heatmaps predicted by the network.
grid_coordinates (torch.Tensor(Nx(LxWxH)x3)):
Coordinates of the grids in the heatmap volumes.
Returns:
human_poses (torch.Tensor(NxKx3)): Coordinates of human poses.
"""
batch_size = heatmap_volumes.size(0)
channel = heatmap_volumes.size(1)
x = heatmap_volumes.reshape(batch_size, channel, -1, 1)
x = F.softmax(self.beta * x, dim=2)
grid_coordinates = grid_coordinates.unsqueeze(1)
x = torch.mul(x, grid_coordinates)
human_poses = torch.sum(x, dim=2)
return human_poses
def get_loss(self, preds, targets, weights):
return dict(loss_pose=self.loss(preds * weights, targets * weights))
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [4]
|
KLDLossNoReduction | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/pq/cpqzd4s4lvyai2i4fbwdxvcbk5blb6mfcwopmeposv2slf62muhb.py
# Topologically Sorted Source Nodes: [mul_1, sigma2, mul, sigma1, truediv, add, log, exp_2, sub, pow_1, add_1, exp_3, mul_2, add_2, truediv_1, add_3, kld], Original ATen: [aten.mul, aten.exp, aten.div, aten.add, aten.log, aten.sub, aten.pow]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# add_2 => add_2
# add_3 => add_3
# exp_2 => exp_2
# exp_3 => exp_3
# kld => sub_1
# log => log
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# pow_1 => pow_1
# sigma1 => exp
# sigma2 => exp_1
# sub => sub
# truediv => div
# truediv_1 => div_1
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, 0.5), kwargs = {})
# %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 0.5), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_1, %exp), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, 1e-08), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add,), kwargs = {})
# %exp_2 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%arg0_1,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg2_1, %arg3_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%exp_2, %pow_1), kwargs = {})
# %exp_3 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%arg1_1,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%exp_3, 2), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, 1e-08), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_1, %add_2), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%log, %div_1), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, 0.5), kwargs = {})
triton_poi_fused_add_div_exp_log_mul_pow_sub_0 = async_compile.triton('triton_poi_fused_add_div_exp_log_mul_pow_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_exp_log_mul_pow_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_exp_log_mul_pow_sub_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp4 = tl.load(in_ptr1 + (x0), xmask)
tmp12 = tl.load(in_ptr2 + (x0), xmask)
tmp13 = tl.load(in_ptr3 + (x0), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp5 = tmp4 * tmp1
tmp6 = tl_math.exp(tmp5)
tmp7 = tmp3 / tmp6
tmp8 = 1e-08
tmp9 = tmp7 + tmp8
tmp10 = tl_math.log(tmp9)
tmp11 = tl_math.exp(tmp4)
tmp14 = tmp12 - tmp13
tmp15 = tmp14 * tmp14
tmp16 = tmp11 + tmp15
tmp17 = tl_math.exp(tmp0)
tmp18 = 2.0
tmp19 = tmp17 * tmp18
tmp20 = tmp19 + tmp8
tmp21 = tmp16 / tmp20
tmp22 = tmp10 + tmp21
tmp23 = tmp22 - tmp1
tl.store(out_ptr0 + (x0), tmp23, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul_1, sigma2, mul, sigma1, truediv, add, log, exp_2, sub, pow_1, add_1, exp_3, mul_2, add_2, truediv_1, add_3, kld], Original ATen: [aten.mul, aten.exp, aten.div, aten.add, aten.log, aten.sub, aten.pow]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_exp_log_mul_pow_sub_0.run(arg1_1, arg0_1, arg2_1, arg3_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg3_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
class KLDLossNoReduction(nn.Module):
def forward(self, mu1, logvar1, mu2, logvar2):
sigma1 = logvar1.mul(0.5).exp()
sigma2 = logvar2.mul(0.5).exp()
kld = torch.log(sigma2 / sigma1 + 1e-08) + (torch.exp(logvar1) + (
mu1 - mu2) ** 2) / (2 * torch.exp(logvar2) + 1e-08) - 1 / 2
return kld
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_exp_log_mul_pow_sub_0(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp4 = tl.load(in_ptr1 + x0, xmask)
tmp12 = tl.load(in_ptr2 + x0, xmask)
tmp13 = tl.load(in_ptr3 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp5 = tmp4 * tmp1
tmp6 = tl_math.exp(tmp5)
tmp7 = tmp3 / tmp6
tmp8 = 1e-08
tmp9 = tmp7 + tmp8
tmp10 = tl_math.log(tmp9)
tmp11 = tl_math.exp(tmp4)
tmp14 = tmp12 - tmp13
tmp15 = tmp14 * tmp14
tmp16 = tmp11 + tmp15
tmp17 = tl_math.exp(tmp0)
tmp18 = 2.0
tmp19 = tmp17 * tmp18
tmp20 = tmp19 + tmp8
tmp21 = tmp16 / tmp20
tmp22 = tmp10 + tmp21
tmp23 = tmp22 - tmp1
tl.store(out_ptr0 + x0, tmp23, xmask)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_exp_log_mul_pow_sub_0[grid(256)](arg1_1,
arg0_1, arg2_1, arg3_1, buf0, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
return buf0,
class KLDLossNoReductionNew(nn.Module):
def forward(self, input_0, input_1, input_2, input_3):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
arg3_1 = input_3
output = call([arg0_1, arg1_1, arg2_1, arg3_1])
return output[0]
| atmacvit/meronymnet | KLDLossNoReduction | false | 6,273 | [
"MIT"
] | 1 | 47e1a7caadc0f770439bb26a93b885f790f62804 | https://github.com/atmacvit/meronymnet/tree/47e1a7caadc0f770439bb26a93b885f790f62804 | import torch
import torch.nn as nn
import torch.utils.data
class Model(nn.Module):
def forward(self, mu1, logvar1, mu2, logvar2):
sigma1 = logvar1.mul(0.5).exp()
sigma2 = logvar2.mul(0.5).exp()
kld = torch.log(sigma2 / sigma1 + 1e-08) + (torch.exp(logvar1) + (
mu1 - mu2) ** 2) / (2 * torch.exp(logvar2) + 1e-08) - 1 / 2
return kld
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
SimpleSpatialEmbedding | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/gi/cgi3fss626cxoqglanqxywnf4pyojcrst3yxyhm5wcmiug6pi5ni.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%sin, %cos], -1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl_math.sin(tmp5)
tmp7 = tl.full(tmp6.shape, 0.0, tmp6.dtype)
tmp8 = tl.where(tmp4, tmp6, tmp7)
tmp9 = tmp0 >= tmp3
tmp10 = tl.full([1], 8, tl.int64)
tmp11 = tmp0 < tmp10
tmp12 = tl.load(in_ptr0 + ((4*x1) + ((-4) + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp13 = tl_math.cos(tmp12)
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp9, tmp13, tmp14)
tmp16 = tl.where(tmp4, tmp8, tmp15)
tl.store(out_ptr0 + (x2), tmp16, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), primals_1, out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(buf0, buf1, 512, grid=grid(512), stream=stream0)
return (buf1, buf0, reinterpret_tensor(primals_2, (4, 64), (1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn
class SimpleSpatialEmbedding(torch.nn.Module):
def __init__(self, in_features, out_features, weight_multiplier=1.0):
super(SimpleSpatialEmbedding, self).__init__()
self.b = torch.zeros((in_features, out_features))
self.b.normal_(0, weight_multiplier)
self.b = torch.nn.Parameter(2.0 ** self.b - 1)
self.osize = out_features
def forward(self, x):
x = torch.matmul(x, self.b)
return torch.cat([torch.sin(x), torch.cos(x)], dim=-1)
def output_size(self):
return 2 * self.osize
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl_math.sin(tmp5)
tmp7 = tl.full(tmp6.shape, 0.0, tmp6.dtype)
tmp8 = tl.where(tmp4, tmp6, tmp7)
tmp9 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp12 = tl.load(in_ptr0 + (4 * x1 + (-4 + x0)), tmp9 & xmask,
eviction_policy='evict_last', other=0.0)
tmp13 = tl_math.cos(tmp12)
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp9, tmp13, tmp14)
tmp16 = tl.where(tmp4, tmp8, tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
primals_1, out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](buf0, buf1, 512, XBLOCK=128,
num_warps=4, num_stages=1)
return buf1, buf0, reinterpret_tensor(primals_2, (4, 64), (1, 4), 0)
class SimpleSpatialEmbeddingNew(torch.nn.Module):
def __init__(self, in_features, out_features, weight_multiplier=1.0):
super(SimpleSpatialEmbeddingNew, self).__init__()
self.b = torch.zeros((in_features, out_features))
self.b.normal_(0, weight_multiplier)
self.b = torch.nn.Parameter(2.0 ** self.b - 1)
self.osize = out_features
def output_size(self):
return 2 * self.osize
def forward(self, input_0):
primals_1 = self.b
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
| ashwinpn/Computer-Vision | SimpleSpatialEmbedding | false | 6,274 | [
"MIT"
] | 1 | 9dc3abfe416385171b76e2bad6872e10f36a12b4 | https://github.com/ashwinpn/Computer-Vision/tree/9dc3abfe416385171b76e2bad6872e10f36a12b4 | import torch
import torch.nn
class Model(torch.nn.Module):
def __init__(self, in_features, out_features, weight_multiplier=1.0):
super().__init__()
self.b = torch.zeros((in_features, out_features))
self.b.normal_(0, weight_multiplier)
self.b = torch.nn.Parameter(2.0 ** self.b - 1)
self.osize = out_features
def forward(self, x):
x = torch.matmul(x, self.b)
return torch.cat([torch.sin(x), torch.cos(x)], dim=-1)
def output_size(self):
return 2 * self.osize
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
MPJPELoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/zu/czujx3x7ghsdpfovcj7w56ztapmlm4ti2434djcb5yn2khft5ydg.py
# Topologically Sorted Source Nodes: [sub, norm, loss, mul], Original ATen: [aten.sub, aten.linalg_vector_norm, aten.mean, aten.mul]
# Source node to ATen node mapping:
# loss => mean
# mul => mul
# norm => pow_1, pow_2, sum_1
# sub => sub
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [-1]), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_2,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 1.0), kwargs = {})
triton_per_fused_linalg_vector_norm_mean_mul_sub_0 = async_compile.triton('triton_per_fused_linalg_vector_norm_mean_mul_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_linalg_vector_norm_mean_mul_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_linalg_vector_norm_mean_mul_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (4*r0), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*r0), None, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr1 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tmp19 = libdevice.sqrt(tmp18)
tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK])
tmp22 = tl.sum(tmp20, 1)[:, None]
tmp23 = 64.0
tmp24 = tmp22 / tmp23
tmp25 = 1.0
tmp26 = tmp24 * tmp25
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp26, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [sub, norm, loss, mul], Original ATen: [aten.sub, aten.linalg_vector_norm, aten.mean, aten.mul]
stream0 = get_raw_stream(0)
triton_per_fused_linalg_vector_norm_mean_mul_sub_0.run(buf1, arg0_1, arg1_1, 1, 64, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class MPJPELoss(nn.Module):
"""MPJPE (Mean Per Joint Position Error) loss.
Args:
use_target_weight (bool): Option to use weighted MSE loss.
Different joint types may have different target weights.
loss_weight (float): Weight of the loss. Default: 1.0.
"""
def __init__(self, use_target_weight=False, loss_weight=1.0):
super().__init__()
self.use_target_weight = use_target_weight
self.loss_weight = loss_weight
def forward(self, output, target, target_weight=None):
"""Forward function.
Note:
- batch_size: N
- num_keypoints: K
- dimension of keypoints: D (D=2 or D=3)
Args:
output (torch.Tensor[N, K, D]): Output regression.
target (torch.Tensor[N, K, D]): Target regression.
target_weight (torch.Tensor[N,K,D]):
Weights across different joint types.
"""
if self.use_target_weight:
assert target_weight is not None
loss = torch.mean(torch.norm((output - target) * target_weight,
dim=-1))
else:
loss = torch.mean(torch.norm(output - target, dim=-1))
return loss * self.loss_weight
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_linalg_vector_norm_mean_mul_sub_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tmp19 = libdevice.sqrt(tmp18)
tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK])
tmp22 = tl.sum(tmp20, 1)[:, None]
tmp23 = 64.0
tmp24 = tmp22 / tmp23
tmp25 = 1.0
tmp26 = tmp24 * tmp25
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp26, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_linalg_vector_norm_mean_mul_sub_0[grid(1)](buf1,
arg0_1, arg1_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class MPJPELossNew(nn.Module):
"""MPJPE (Mean Per Joint Position Error) loss.
Args:
use_target_weight (bool): Option to use weighted MSE loss.
Different joint types may have different target weights.
loss_weight (float): Weight of the loss. Default: 1.0.
"""
def __init__(self, use_target_weight=False, loss_weight=1.0):
super().__init__()
self.use_target_weight = use_target_weight
self.loss_weight = loss_weight
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| atoaiari/mmpose | MPJPELoss | false | 6,275 | [
"Apache-2.0"
] | 1 | 256a9117767008e8c33b4038a346aca12233e300 | https://github.com/atoaiari/mmpose/tree/256a9117767008e8c33b4038a346aca12233e300 | import torch
import torch.nn as nn
class Model(nn.Module):
"""MPJPE (Mean Per Joint Position Error) loss.
Args:
use_target_weight (bool): Option to use weighted MSE loss.
Different joint types may have different target weights.
loss_weight (float): Weight of the loss. Default: 1.0.
"""
def __init__(self, use_target_weight=False, loss_weight=1.0):
super().__init__()
self.use_target_weight = use_target_weight
self.loss_weight = loss_weight
def forward(self, output, target, target_weight=None):
"""Forward function.
Note:
- batch_size: N
- num_keypoints: K
- dimension of keypoints: D (D=2 or D=3)
Args:
output (torch.Tensor[N, K, D]): Output regression.
target (torch.Tensor[N, K, D]): Target regression.
target_weight (torch.Tensor[N,K,D]):
Weights across different joint types.
"""
if self.use_target_weight:
assert target_weight is not None
loss = torch.mean(torch.norm((output - target) * target_weight,
dim=-1))
else:
loss = torch.mean(torch.norm(output - target, dim=-1))
return loss * self.loss_weight
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
SelfGate | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/3p/c3p4nipaqzxzpjjeoqn62b5xjhgbstvjxzoaitmarjy5ehvsncbu.py
# Topologically Sorted Source Nodes: [mish, x_2], Original ATen: [aten.mish, aten.mul]
# Source node to ATen node mapping:
# mish => exp, gt, log1p, mul, tanh, where
# x_2 => mul_1
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%getitem_1,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%getitem_1, 20), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %getitem_1, %log1p), kwargs = {})
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%where,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem_1, %tanh), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem, %mul), kwargs = {})
triton_poi_fused_mish_mul_0 = async_compile.triton('triton_poi_fused_mish_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mish_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mish_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (8*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0 + (8*x1)), xmask)
tmp2 = 20.0
tmp3 = tmp1 > tmp2
tmp4 = tl_math.exp(tmp1)
tmp5 = libdevice.log1p(tmp4)
tmp6 = tl.where(tmp3, tmp1, tmp5)
tmp7 = libdevice.tanh(tmp6)
tmp8 = tmp1 * tmp7
tmp9 = tmp0 * tmp8
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (8, 4), (4, 1))
assert_size_stride(primals_2, (8, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 8), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mish, x_2], Original ATen: [aten.mish, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mish_mul_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0)
return (buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf0, (4, 4, 4, 4), (128, 32, 8, 1), 0), reinterpret_tensor(buf0, (4, 4, 4, 4), (128, 32, 8, 1), 4), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((8, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from torch.nn import functional as F
class SelfGate(nn.Module):
def __init__(self, dim_in, dim_out):
super().__init__()
self.proj = nn.Linear(dim_in, dim_out * 2)
def forward(self, x):
x = self.proj(x)
x, gate = x.chunk(2, dim=-1)
x = x * F.mish(gate)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim_in': 4, 'dim_out': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mish_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 8 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0 + 8 * x1), xmask)
tmp2 = 20.0
tmp3 = tmp1 > tmp2
tmp4 = tl_math.exp(tmp1)
tmp5 = libdevice.log1p(tmp4)
tmp6 = tl.where(tmp3, tmp1, tmp5)
tmp7 = libdevice.tanh(tmp6)
tmp8 = tmp1 * tmp7
tmp9 = tmp0 * tmp8
tl.store(out_ptr0 + x2, tmp9, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (8, 4), (4, 1))
assert_size_stride(primals_2, (8,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 8), (8, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 8), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mish_mul_0[grid(256)](buf0, buf1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf0, (4, 4, 4, 4), (128, 32, 8, 1), 0
), reinterpret_tensor(buf0, (4, 4, 4, 4), (128, 32, 8, 1), 4)
class SelfGateNew(nn.Module):
def __init__(self, dim_in, dim_out):
super().__init__()
self.proj = nn.Linear(dim_in, dim_out * 2)
def forward(self, input_0):
primals_1 = self.proj.weight
primals_2 = self.proj.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| awesome-archive/AI-Writer | SelfGate | false | 6,276 | [
"BSD-3-Clause"
] | 1 | abdcd5582f81fca2f677a020360654865bf82065 | https://github.com/awesome-archive/AI-Writer/tree/abdcd5582f81fca2f677a020360654865bf82065 | import torch
import torch.nn as nn
from torch.nn import functional as F
class Model(nn.Module):
def __init__(self, dim_in, dim_out):
super().__init__()
self.proj = nn.Linear(dim_in, dim_out * 2)
def forward(self, x):
x = self.proj(x)
x, gate = x.chunk(2, dim=-1)
x = x * F.mish(gate)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
GELU | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/zf/czf4b75gr6eqse3otrscdjeulescoi5ktlpvmsmjlmhwzv5w7coq.py
# Topologically Sorted Source Nodes: [mul, wrapped_sqrt, pow_1, mul_1, add, mul_2, tanh, add_1, mul_3], Original ATen: [aten.mul, aten.sqrt, aten.pow, aten.add, aten.tanh]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# mul_3 => mul_3
# pow_1 => pow_1
# tanh => tanh
# wrapped_sqrt => full_default
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 0.5), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.7978845608028654), kwargs = {dtype: torch.float64, layout: torch.strided, device: cpu, pin_memory: False})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 3), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, 0.044715), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, %mul_1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%full_default, %add), kwargs = {})
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%mul_2,), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%tanh, 1), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %add_1), kwargs = {})
triton_poi_fused_add_mul_pow_sqrt_tanh_0 = async_compile.triton('triton_poi_fused_add_mul_pow_sqrt_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_pow_sqrt_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_pow_sqrt_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = tmp0 * tmp0
tmp4 = tmp3 * tmp0
tmp5 = 0.044715
tmp6 = tmp4 * tmp5
tmp7 = tmp0 + tmp6
tmp8 = 0.7978845608028654
tmp9 = tmp8 * tmp7
tmp10 = libdevice.tanh(tmp9)
tmp11 = 1.0
tmp12 = tmp10 + tmp11
tmp13 = tmp2 * tmp12
tl.store(out_ptr0 + (x0), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, wrapped_sqrt, pow_1, mul_1, add, mul_2, tanh, add_1, mul_3], Original ATen: [aten.mul, aten.sqrt, aten.pow, aten.add, aten.tanh]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_pow_sqrt_tanh_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
from torch import nn
import torch.nn.functional as F
class GELU(nn.Module):
def __init__(self):
super(GELU, self).__init__()
def forward(self, x):
return 0.5 * x * (1 + F.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 *
torch.pow(x, 3))))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_pow_sqrt_tanh_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = tmp0 * tmp0
tmp4 = tmp3 * tmp0
tmp5 = 0.044715
tmp6 = tmp4 * tmp5
tmp7 = tmp0 + tmp6
tmp8 = 0.7978845608028654
tmp9 = tmp8 * tmp7
tmp10 = libdevice.tanh(tmp9)
tmp11 = 1.0
tmp12 = tmp10 + tmp11
tmp13 = tmp2 * tmp12
tl.store(out_ptr0 + x0, tmp13, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_pow_sqrt_tanh_0[grid(256)](arg0_1, buf0,
256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class GELUNew(nn.Module):
def __init__(self):
super(GELUNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| au55555/classification-pytorch | GELU | false | 6,277 | [
"MIT"
] | 1 | 1937599ae6e688ed7af7470f69964fb6f97241c4 | https://github.com/au55555/classification-pytorch/tree/1937599ae6e688ed7af7470f69964fb6f97241c4 | import torch
import numpy as np
from torch import nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return 0.5 * x * (1 + F.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 *
torch.pow(x, 3))))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
Selection | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/os/cosmcerpidzwa6sjwrma6sckbev7edvqrnpoottn3y2xknmtustb.py
# Topologically Sorted Source Nodes: [std], Original ATen: [aten.std]
# Source node to ATen node mapping:
# std => sqrt, var
# Graph fragment:
# %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%primals_2, [0]), kwargs = {correction: 1.0})
# %sqrt : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%var,), kwargs = {})
triton_poi_fused_std_0 = async_compile.triton('triton_poi_fused_std_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_std_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_std_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0), xmask)
tmp3 = tl.load(in_ptr0 + (128 + x0), xmask)
tmp5 = tl.load(in_ptr0 + (192 + x0), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = 3.0
tmp21 = tmp19 / tmp20
tmp22 = libdevice.sqrt(tmp21)
tl.store(out_ptr0 + (x0), tmp22, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/dk/cdkg7zys5y3gqtiwl2zlfbqbq5tru7ravxon55tbwigoo34fkspd.py
# Topologically Sorted Source Nodes: [exp, mul, mul_1, sample], Original ATen: [aten.exp, aten.mul, aten.add]
# Source node to ATen node mapping:
# exp => exp
# mul => mul
# mul_1 => mul_1
# sample => add
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%expand,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expand_1, %exp), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_3), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_2, %mul_1), kwargs = {})
triton_poi_fused_add_exp_mul_1 = async_compile.triton('triton_poi_fused_add_exp_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_exp_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_exp_mul_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = xindex % 64
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x4), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x3), xmask)
tmp3 = tl_math.exp(tmp2)
tmp4 = tmp1 * tmp3
tmp6 = tmp4 * tmp5
tmp7 = tmp0 + tmp6
tl.store(out_ptr0 + (x3), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, ), (1, ))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [std], Original ATen: [aten.std]
stream0 = get_raw_stream(0)
triton_poi_fused_std_0.run(primals_2, buf0, 64, grid=grid(64), stream=stream0)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [exp, mul, mul_1, sample], Original ATen: [aten.exp, aten.mul, aten.add]
triton_poi_fused_add_exp_mul_1.run(primals_2, buf0, primals_1, primals_3, buf1, 256, grid=grid(256), stream=stream0)
del primals_2
return (buf1, primals_1, primals_3, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Selection(nn.Module):
"""
Selection neurons to sample from a latent representation for a decoder agent.
An abstract representation :math:`l_i` is disturbed by a value :math:`r_i` sampled from a normal
standard distribution which is scaled by the selection neuron :math:`s_i`.
..math::
n_i \\sim l_i + \\sigma_{l_i} imes \\exp(s_i) imes r_i
where :math:`\\sigma_{l_i}` is the standard deviation over the batch.
If the selection neuron has a low (i.e. negative) value, the latent variable is passed to the agent.
If the selection neuron has a high value (i.e. close to zero), the latent variable is rendered useless to the agent.
Args:
num_selectors (int): Number of selection neurons, i.e. latent variables.
**kwargs:
init_selectors (float): Initial value for selection neurons. Default: -10.
"""
def __init__(self, num_selectors, init_selectors=-10.0):
super(Selection, self).__init__()
select = torch.Tensor([init_selectors for _ in range(num_selectors)])
self.selectors = nn.Parameter(select)
def forward(self, x, rand, std_dev=None):
"""
The forward pass for the selection neurons.
Args:
x (torch.Tensor): The input array of shape (batch_size, size_latent).
rand (torch.Tensor): Random samples from standard normal distribution of size (batch_size, size_latent).
**kwargs:
std_dev (:class:`torch.Tensor` or :class:`NoneType`): The standard deviation calculated throughout
episodes. Needs to be specified for prediction.
Default: None.
Returns:
sample (torch.Tensor): Sample from a distribution around latent variables.
"""
selectors = self.selectors.expand_as(x)
if std_dev is None:
std = x.std(dim=0).expand_as(x)
else:
std = std_dev
sample = x + std * torch.exp(selectors) * rand
return sample
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_selectors': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_std_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0), xmask)
tmp3 = tl.load(in_ptr0 + (128 + x0), xmask)
tmp5 = tl.load(in_ptr0 + (192 + x0), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = 3.0
tmp21 = tmp19 / tmp20
tmp22 = libdevice.sqrt(tmp21)
tl.store(out_ptr0 + x0, tmp22, xmask)
@triton.jit
def triton_poi_fused_add_exp_mul_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = xindex % 64
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x3, xmask)
tmp3 = tl_math.exp(tmp2)
tmp4 = tmp1 * tmp3
tmp6 = tmp4 * tmp5
tmp7 = tmp0 + tmp6
tl.store(out_ptr0 + x3, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_std_0[grid(64)](primals_2, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_exp_mul_1[grid(256)](primals_2, buf0,
primals_1, primals_3, buf1, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_2
return buf1, primals_1, primals_3, buf0
class SelectionNew(nn.Module):
"""
Selection neurons to sample from a latent representation for a decoder agent.
An abstract representation :math:`l_i` is disturbed by a value :math:`r_i` sampled from a normal
standard distribution which is scaled by the selection neuron :math:`s_i`.
..math::
n_i \\sim l_i + \\sigma_{l_i} imes \\exp(s_i) imes r_i
where :math:`\\sigma_{l_i}` is the standard deviation over the batch.
If the selection neuron has a low (i.e. negative) value, the latent variable is passed to the agent.
If the selection neuron has a high value (i.e. close to zero), the latent variable is rendered useless to the agent.
Args:
num_selectors (int): Number of selection neurons, i.e. latent variables.
**kwargs:
init_selectors (float): Initial value for selection neurons. Default: -10.
"""
def __init__(self, num_selectors, init_selectors=-10.0):
super(SelectionNew, self).__init__()
select = torch.Tensor([init_selectors for _ in range(num_selectors)])
self.selectors = nn.Parameter(select)
def forward(self, input_0, input_1):
primals_1 = self.selectors
primals_2 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3])
return output[0]
| aswanthkrishna/reinforced_scinet | Selection | false | 6,278 | [
"Apache-2.0"
] | 1 | b520f0c73bb1cdf0d0595f0df32372c96946d963 | https://github.com/aswanthkrishna/reinforced_scinet/tree/b520f0c73bb1cdf0d0595f0df32372c96946d963 | import torch
import torch.nn as nn
class Model(nn.Module):
"""
Selection neurons to sample from a latent representation for a decoder agent.
An abstract representation :math:`l_i` is disturbed by a value :math:`r_i` sampled from a normal
standard distribution which is scaled by the selection neuron :math:`s_i`.
..math::
n_i \\sim l_i + \\sigma_{l_i} imes \\exp(s_i) imes r_i
where :math:`\\sigma_{l_i}` is the standard deviation over the batch.
If the selection neuron has a low (i.e. negative) value, the latent variable is passed to the agent.
If the selection neuron has a high value (i.e. close to zero), the latent variable is rendered useless to the agent.
Args:
num_selectors (int): Number of selection neurons, i.e. latent variables.
**kwargs:
init_selectors (float): Initial value for selection neurons. Default: -10.
"""
def __init__(self, num_selectors, init_selectors=-10.0):
super().__init__()
select = torch.Tensor([init_selectors for _ in range(num_selectors)])
self.selectors = nn.Parameter(select)
def forward(self, x, rand, std_dev=None):
"""
The forward pass for the selection neurons.
Args:
x (torch.Tensor): The input array of shape (batch_size, size_latent).
rand (torch.Tensor): Random samples from standard normal distribution of size (batch_size, size_latent).
**kwargs:
std_dev (:class:`torch.Tensor` or :class:`NoneType`): The standard deviation calculated throughout
episodes. Needs to be specified for prediction.
Default: None.
Returns:
sample (torch.Tensor): Sample from a distribution around latent variables.
"""
selectors = self.selectors.expand_as(x)
if std_dev is None:
std = x.std(dim=0).expand_as(x)
else:
std = std_dev
sample = x + std * torch.exp(selectors) * rand
return sample
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
SmoothL1Loss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/c5/cc5asg4nd3jaxgpbn6ptpyi6axgsaygnv5pqlnptuo35p2hhtygb.py
# Topologically Sorted Source Nodes: [loss, mul], Original ATen: [aten.smooth_l1_loss, aten.mul]
# Source node to ATen node mapping:
# loss => abs_1, div, lt, mean, mul, pow_1, sub, sub_1, where
# mul => mul_1
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %arg0_1), kwargs = {})
# %abs_1 : [num_users=3] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {})
# %lt : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%abs_1, 1.0), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%abs_1, 2), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, 0.5), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, 1.0), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%abs_1, 0.5), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%lt, %div, %sub_1), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%where,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 1.0), kwargs = {})
triton_per_fused_mul_smooth_l1_loss_0 = async_compile.triton('triton_per_fused_mul_smooth_l1_loss_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mul_smooth_l1_loss_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mul_smooth_l1_loss_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = 1.0
tmp5 = tmp3 < tmp4
tmp6 = tmp3 * tmp3
tmp7 = 0.5
tmp8 = tmp6 * tmp7
tmp9 = tmp8 * tmp4
tmp10 = tmp3 - tmp7
tmp11 = tl.where(tmp5, tmp9, tmp10)
tmp12 = tl.broadcast_to(tmp11, [RBLOCK])
tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0))
tmp15 = 256.0
tmp16 = tmp14 / tmp15
tmp17 = tmp16 * tmp4
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp17, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [loss, mul], Original ATen: [aten.smooth_l1_loss, aten.mul]
stream0 = get_raw_stream(0)
triton_per_fused_mul_smooth_l1_loss_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class SmoothL1Loss(nn.Module):
"""SmoothL1Loss loss.
Args:
use_target_weight (bool): Option to use weighted MSE loss.
Different joint types may have different target weights.
loss_weight (float): Weight of the loss. Default: 1.0.
"""
def __init__(self, use_target_weight=False, loss_weight=1.0):
super().__init__()
self.criterion = F.smooth_l1_loss
self.use_target_weight = use_target_weight
self.loss_weight = loss_weight
def forward(self, output, target, target_weight=None):
"""Forward function.
Note:
- batch_size: N
- num_keypoints: K
- dimension of keypoints: D (D=2 or D=3)
Args:
output (torch.Tensor[N, K, D]): Output regression.
target (torch.Tensor[N, K, D]): Target regression.
target_weight (torch.Tensor[N, K, D]):
Weights across different joint types.
"""
if self.use_target_weight:
assert target_weight is not None
loss = self.criterion(output * target_weight, target *
target_weight)
else:
loss = self.criterion(output, target)
return loss * self.loss_weight
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mul_smooth_l1_loss_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = 1.0
tmp5 = tmp3 < tmp4
tmp6 = tmp3 * tmp3
tmp7 = 0.5
tmp8 = tmp6 * tmp7
tmp9 = tmp8 * tmp4
tmp10 = tmp3 - tmp7
tmp11 = tl.where(tmp5, tmp9, tmp10)
tmp12 = tl.broadcast_to(tmp11, [RBLOCK])
tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0))
tmp15 = 256.0
tmp16 = tmp14 / tmp15
tmp17 = tmp16 * tmp4
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp17, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mul_smooth_l1_loss_0[grid(1)](buf1, arg1_1, arg0_1,
1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class SmoothL1LossNew(nn.Module):
"""SmoothL1Loss loss.
Args:
use_target_weight (bool): Option to use weighted MSE loss.
Different joint types may have different target weights.
loss_weight (float): Weight of the loss. Default: 1.0.
"""
def __init__(self, use_target_weight=False, loss_weight=1.0):
super().__init__()
self.criterion = F.smooth_l1_loss
self.use_target_weight = use_target_weight
self.loss_weight = loss_weight
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| atoaiari/mmpose | SmoothL1Loss | false | 6,279 | [
"Apache-2.0"
] | 1 | 256a9117767008e8c33b4038a346aca12233e300 | https://github.com/atoaiari/mmpose/tree/256a9117767008e8c33b4038a346aca12233e300 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
"""SmoothL1Loss loss.
Args:
use_target_weight (bool): Option to use weighted MSE loss.
Different joint types may have different target weights.
loss_weight (float): Weight of the loss. Default: 1.0.
"""
def __init__(self, use_target_weight=False, loss_weight=1.0):
super().__init__()
self.criterion = F.smooth_l1_loss
self.use_target_weight = use_target_weight
self.loss_weight = loss_weight
def forward(self, output, target, target_weight=None):
"""Forward function.
Note:
- batch_size: N
- num_keypoints: K
- dimension of keypoints: D (D=2 or D=3)
Args:
output (torch.Tensor[N, K, D]): Output regression.
target (torch.Tensor[N, K, D]): Target regression.
target_weight (torch.Tensor[N, K, D]):
Weights across different joint types.
"""
if self.use_target_weight:
assert target_weight is not None
loss = self.criterion(output * target_weight, target *
target_weight)
else:
loss = self.criterion(output, target)
return loss * self.loss_weight
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
L1Loss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/i5/ci5r22vnwphjxav3oibgww4fkm25q4egp3rofzniyjru2u4b563f.py
# Topologically Sorted Source Nodes: [loss, mul], Original ATen: [aten.sub, aten.abs, aten.mean, aten.mul]
# Source node to ATen node mapping:
# loss => abs_1, mean, sub
# mul => mul
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %arg0_1), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%abs_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 1.0), kwargs = {})
triton_per_fused_abs_mean_mul_sub_0 = async_compile.triton('triton_per_fused_abs_mean_mul_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_mean_mul_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_abs_mean_mul_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = 256.0
tmp8 = tmp6 / tmp7
tmp9 = 1.0
tmp10 = tmp8 * tmp9
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp10, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [loss, mul], Original ATen: [aten.sub, aten.abs, aten.mean, aten.mul]
stream0 = get_raw_stream(0)
triton_per_fused_abs_mean_mul_sub_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class L1Loss(nn.Module):
"""L1Loss loss ."""
def __init__(self, use_target_weight=False, loss_weight=1.0):
super().__init__()
self.criterion = F.l1_loss
self.use_target_weight = use_target_weight
self.loss_weight = loss_weight
def forward(self, output, target, target_weight=None):
"""Forward function.
Note:
- batch_size: N
- num_keypoints: K
Args:
output (torch.Tensor[N, K, 2]): Output regression.
target (torch.Tensor[N, K, 2]): Target regression.
target_weight (torch.Tensor[N, K, 2]):
Weights across different joint types.
"""
if self.use_target_weight:
assert target_weight is not None
loss = self.criterion(output * target_weight, target *
target_weight)
else:
loss = self.criterion(output, target)
return loss * self.loss_weight
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_mean_mul_sub_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = 256.0
tmp8 = tmp6 / tmp7
tmp9 = 1.0
tmp10 = tmp8 * tmp9
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp10, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_abs_mean_mul_sub_0[grid(1)](buf1, arg1_1, arg0_1,
1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class L1LossNew(nn.Module):
"""L1Loss loss ."""
def __init__(self, use_target_weight=False, loss_weight=1.0):
super().__init__()
self.criterion = F.l1_loss
self.use_target_weight = use_target_weight
self.loss_weight = loss_weight
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| atoaiari/mmpose | L1Loss | false | 6,280 | [
"Apache-2.0"
] | 1 | 256a9117767008e8c33b4038a346aca12233e300 | https://github.com/atoaiari/mmpose/tree/256a9117767008e8c33b4038a346aca12233e300 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
"""L1Loss loss ."""
def __init__(self, use_target_weight=False, loss_weight=1.0):
super().__init__()
self.criterion = F.l1_loss
self.use_target_weight = use_target_weight
self.loss_weight = loss_weight
def forward(self, output, target, target_weight=None):
"""Forward function.
Note:
- batch_size: N
- num_keypoints: K
Args:
output (torch.Tensor[N, K, 2]): Output regression.
target (torch.Tensor[N, K, 2]): Target regression.
target_weight (torch.Tensor[N, K, 2]):
Weights across different joint types.
"""
if self.use_target_weight:
assert target_weight is not None
loss = self.criterion(output * target_weight, target *
target_weight)
else:
loss = self.criterion(output, target)
return loss * self.loss_weight
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
Postnet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/iu/ciuxern2omgit5ovksuiwlddxkww6e3pkid4q2h3sauzn5rbd35z.py
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv1d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%permute, %primals_2, %primals_3, [1], [0], [1], False, [0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/kn/cknvzagd4cmj4kdoivmelcmvo6he4ziakkyh2o7mwewd7eqarhyj.py
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv1d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%permute, %primals_2, %primals_3, [1], [0], [1], False, [0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1280
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 80
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (80, 4, 1), (4, 1, 1))
assert_size_stride(primals_3, (80, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(primals_1, buf0, 16, 4, grid=grid(16, 4), stream=stream0)
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 80, 4), (320, 4, 1))
del buf0
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf2, primals_3, 1280, grid=grid(1280), stream=stream0)
del primals_3
return (reinterpret_tensor(buf2, (4, 4, 80), (320, 1, 4), 0), primals_2, reinterpret_tensor(primals_1, (4, 4, 4), (16, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((80, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((80, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class Postnet(nn.Module):
"""Postnet is a simple linear layer for predicting the target frames given the
RNN context during training. We don't need the Postnet for feature extraction.
"""
def __init__(self, input_size, output_size=80):
super(Postnet, self).__init__()
self.layer = nn.Conv1d(in_channels=input_size, out_channels=
output_size, kernel_size=1, stride=1)
def forward(self, inputs):
inputs = torch.transpose(inputs, 1, 2)
return torch.transpose(self.layer(inputs), 1, 2)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 1280
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 80
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (80, 4, 1), (4, 1, 1))
assert_size_stride(primals_3, (80,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(16, 4)](primals_1, buf0, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 80, 4), (320, 4, 1))
del buf0
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(1280)](buf2, primals_3, 1280,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_3
return reinterpret_tensor(buf2, (4, 4, 80), (320, 1, 4), 0
), primals_2, reinterpret_tensor(primals_1, (4, 4, 4), (16, 1, 4), 0)
class PostnetNew(nn.Module):
"""Postnet is a simple linear layer for predicting the target frames given the
RNN context during training. We don't need the Postnet for feature extraction.
"""
def __init__(self, input_size, output_size=80):
super(PostnetNew, self).__init__()
self.layer = nn.Conv1d(in_channels=input_size, out_channels=
output_size, kernel_size=1, stride=1)
def forward(self, input_0):
primals_2 = self.layer.weight
primals_3 = self.layer.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| aviasd/Mockingjay-Speech-Representation | Postnet | false | 6,281 | [
"MIT"
] | 1 | c01aef3f98bbb3fd4b0fc1b61e77fb5d02a0e453 | https://github.com/aviasd/Mockingjay-Speech-Representation/tree/c01aef3f98bbb3fd4b0fc1b61e77fb5d02a0e453 | import torch
from torch import nn
class Model(nn.Module):
"""Postnet is a simple linear layer for predicting the target frames given the
RNN context during training. We don't need the Postnet for feature extraction.
"""
def __init__(self, input_size, output_size=80):
super().__init__()
self.layer = nn.Conv1d(in_channels=input_size, out_channels=
output_size, kernel_size=1, stride=1)
def forward(self, inputs):
inputs = torch.transpose(inputs, 1, 2)
return torch.transpose(self.layer(inputs), 1, 2)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [4]
|
DivideMax | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/aj/cajsvoa7fstjjubnvzw5a3oolwiymmoabi4yem6uzbjjug2hou5t.py
# Topologically Sorted Source Nodes: [maxes, truediv], Original ATen: [aten.amax, aten.div]
# Source node to ATen node mapping:
# maxes => amax
# truediv => div
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [4], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %amax), kwargs = {})
triton_poi_fused_amax_div_0 = async_compile.triton('triton_poi_fused_amax_div_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_amax_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_amax_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [maxes, truediv], Original ATen: [aten.amax, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_amax_div_0.run(arg0_1, buf0, 1024, grid=grid(1024), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.utils.data
class DivideMax(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, x):
maxes = x.amax(dim=self.dim, keepdim=True)
return x / maxes
def get_inputs():
return [torch.rand([4, 4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_amax_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_amax_div_0[grid(1024)](arg0_1, buf0, 1024, XBLOCK=
128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class DivideMaxNew(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| avihu111/viewpoint_disentanglement | DivideMax | false | 6,282 | [
"MIT"
] | 1 | 07aa4e119426a500fb1e5b5929909cd791982f27 | https://github.com/avihu111/viewpoint_disentanglement/tree/07aa4e119426a500fb1e5b5929909cd791982f27 | import torch
from torch import nn
import torch.utils.data
class Model(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, x):
maxes = x.amax(dim=self.dim, keepdim=True)
return x / maxes
def get_inputs():
return [torch.rand([4, 4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
Mlp | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/zf/czf4b75gr6eqse3otrscdjeulescoi5ktlpvmsmjlmhwzv5w7coq.py
# Topologically Sorted Source Nodes: [mul, wrapped_sqrt, pow_1, mul_1, add, mul_2, tanh, add_1, x_1], Original ATen: [aten.mul, aten.sqrt, aten.pow, aten.add, aten.tanh]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# pow_1 => pow_1
# tanh => tanh
# wrapped_sqrt => full_default
# x_1 => mul_3
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.5), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.7978845608028654), kwargs = {dtype: torch.float64, layout: torch.strided, device: cpu, pin_memory: False})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_1, 3), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, 0.044715), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %mul_1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%full_default, %add), kwargs = {})
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%mul_2,), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%tanh, 1), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %add_1), kwargs = {})
triton_poi_fused_add_mul_pow_sqrt_tanh_0 = async_compile.triton('triton_poi_fused_add_mul_pow_sqrt_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_pow_sqrt_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_pow_sqrt_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = tmp0 * tmp0
tmp4 = tmp3 * tmp0
tmp5 = 0.044715
tmp6 = tmp4 * tmp5
tmp7 = tmp0 + tmp6
tmp8 = 0.7978845608028654
tmp9 = tmp8 * tmp7
tmp10 = libdevice.tanh(tmp9)
tmp11 = 1.0
tmp12 = tmp10 + tmp11
tmp13 = tmp2 * tmp12
tl.store(out_ptr0 + (x0), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, wrapped_sqrt, pow_1, mul_1, add, mul_2, tanh, add_1, x_1], Original ATen: [aten.mul, aten.sqrt, aten.pow, aten.add, aten.tanh]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_pow_sqrt_tanh_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_5
return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
from torch import nn
import torch.nn.functional as F
class GELU(nn.Module):
def __init__(self):
super(GELU, self).__init__()
def forward(self, x):
return 0.5 * x * (1 + F.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 *
torch.pow(x, 3))))
class Mlp(nn.Module):
""" MLP as used in Vision Transformer, MLP-Mixer and related networks
"""
def __init__(self, in_features, hidden_features=None, out_features=None,
act_layer=GELU, drop=0.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
drop_probs = drop, drop
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.drop1 = nn.Dropout(drop_probs[0])
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop2 = nn.Dropout(drop_probs[1])
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop1(x)
x = self.fc2(x)
x = self.drop2(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import numpy as np
from torch import nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_pow_sqrt_tanh_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = tmp0 * tmp0
tmp4 = tmp3 * tmp0
tmp5 = 0.044715
tmp6 = tmp4 * tmp5
tmp7 = tmp0 + tmp6
tmp8 = 0.7978845608028654
tmp9 = tmp8 * tmp7
tmp10 = libdevice.tanh(tmp9)
tmp11 = 1.0
tmp12 = tmp10 + tmp11
tmp13 = tmp2 * tmp12
tl.store(out_ptr0 + x0, tmp13, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_pow_sqrt_tanh_0[grid(256)](buf0, buf1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4
class GELU(nn.Module):
def __init__(self):
super(GELU, self).__init__()
def forward(self, x):
return 0.5 * x * (1 + F.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 *
torch.pow(x, 3))))
class MlpNew(nn.Module):
""" MLP as used in Vision Transformer, MLP-Mixer and related networks
"""
def __init__(self, in_features, hidden_features=None, out_features=None,
act_layer=GELU, drop=0.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
drop_probs = drop, drop
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.drop1 = nn.Dropout(drop_probs[0])
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop2 = nn.Dropout(drop_probs[1])
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| au55555/classification-pytorch | Mlp | false | 6,283 | [
"MIT"
] | 1 | 1937599ae6e688ed7af7470f69964fb6f97241c4 | https://github.com/au55555/classification-pytorch/tree/1937599ae6e688ed7af7470f69964fb6f97241c4 | import torch
import numpy as np
from torch import nn
import torch.nn.functional as F
class GELU(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return 0.5 * x * (1 + F.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 *
torch.pow(x, 3))))
class Model(nn.Module):
""" MLP as used in Vision Transformer, MLP-Mixer and related networks
"""
def __init__(self, in_features, hidden_features=None, out_features=None,
act_layer=GELU, drop=0.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
drop_probs = drop, drop
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.drop1 = nn.Dropout(drop_probs[0])
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop2 = nn.Dropout(drop_probs[1])
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop1(x)
x = self.fc2(x)
x = self.drop2(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
AllocatingLayer | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/by/cbyov6dryaqjwmmslffo5d7emxcqphfyezaudbzliyrapdu3kwho.py
# Topologically Sorted Source Nodes: [VperW_diff, ge_1, Better_j_than_i], Original ATen: [aten.sub, aten.ge, aten.mul]
# Source node to ATen node mapping:
# Better_j_than_i => mul
# VperW_diff => sub
# ge_1 => ge_1
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%unsqueeze, %unsqueeze_1), kwargs = {})
# %ge_1 : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%sub, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%ge_1, 1.0), kwargs = {})
triton_poi_fused_ge_mul_sub_0 = async_compile.triton('triton_poi_fused_ge_mul_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_ge_mul_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_ge_mul_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = (xindex // 16)
x3 = (xindex // 4)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x2)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp3 = 0.0
tmp4 = tmp2 >= tmp3
tmp5 = tmp4.to(tl.float32)
tmp6 = 1.0
tmp7 = tmp5 * tmp6
tl.store(out_ptr0 + (x4), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/7k/c7kgo6bkzv3gvnwofunppraug5x44tpcbfj4os2obs6d7djs6z2q.py
# Topologically Sorted Source Nodes: [sub_1, Satisfying_Constr, mul_1], Original ATen: [aten.rsub, aten.ge, aten.mul]
# Source node to ATen node mapping:
# Satisfying_Constr => ge_2
# mul_1 => mul_1
# sub_1 => sub_1
# Graph fragment:
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (4, %squeeze), kwargs = {})
# %ge_2 : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%sub_1, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%ge_2, %arg1_1), kwargs = {})
triton_poi_fused_ge_mul_rsub_1 = async_compile.triton('triton_poi_fused_ge_mul_rsub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_ge_mul_rsub_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_ge_mul_rsub_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp6 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 4.0
tmp2 = tmp1 - tmp0
tmp3 = 0.0
tmp4 = tmp2 >= tmp3
tmp5 = tmp4.to(tl.float32)
tmp7 = tmp5 * tmp6
tl.store(in_out_ptr0 + (x0), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [VperW_diff, ge_1, Better_j_than_i], Original ATen: [aten.sub, aten.ge, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_ge_mul_sub_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [VperW_diff, ge_1, Better_j_than_i, matmul], Original ATen: [aten.sub, aten.ge, aten.mul, aten.bmm]
extern_kernels.bmm(buf0, reinterpret_tensor(arg1_1, (4, 4, 1), (4, 1, 1), 0), out=buf1)
del buf0
buf2 = reinterpret_tensor(buf1, (4, 4), (4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [sub_1, Satisfying_Constr, mul_1], Original ATen: [aten.rsub, aten.ge, aten.mul]
triton_poi_fused_ge_mul_rsub_1.run(buf2, arg1_1, 16, grid=grid(16), stream=stream0)
del arg1_1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from torch.nn import Module
import torch
from torch.nn.modules.module import Module
class AllocatingLayer(Module):
"""The actor NN base its output for the case of full CSI on a continuous relaxation of the problem. Specifically it gives
a value for every user. This layer will start allocating to the most valuable bw until no more resources are available for
the least valuable users
"""
def __init__(self, Resource):
super(AllocatingLayer, self).__init__()
self.W = Resource
def forward(self, values, weights):
batchSize, Kusers = values.shape
assert list(weights.size()) == [batchSize, Kusers] and (values >= 0
).all()
VperW_diff = values.unsqueeze(dim=1).detach() - values.unsqueeze(dim=2
).detach()
assert list(VperW_diff.shape) == [batchSize, Kusers, Kusers]
Better_j_than_i = 1.0 * (VperW_diff >= 0)
Satisfying_Constr = self.W - torch.matmul(Better_j_than_i, weights.
unsqueeze(dim=2)).squeeze() >= 0
assert list(Satisfying_Constr.shape) == [batchSize, Kusers]
return Satisfying_Constr * weights
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'Resource': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.nn import Module
from torch.nn.modules.module import Module
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_ge_mul_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex // 16
x3 = xindex // 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp1 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp3 = 0.0
tmp4 = tmp2 >= tmp3
tmp5 = tmp4.to(tl.float32)
tmp6 = 1.0
tmp7 = tmp5 * tmp6
tl.store(out_ptr0 + x4, tmp7, xmask)
@triton.jit
def triton_poi_fused_ge_mul_rsub_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp6 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 4.0
tmp2 = tmp1 - tmp0
tmp3 = 0.0
tmp4 = tmp2 >= tmp3
tmp5 = tmp4.to(tl.float32)
tmp7 = tmp5 * tmp6
tl.store(in_out_ptr0 + x0, tmp7, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_ge_mul_sub_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf0, reinterpret_tensor(arg1_1, (4, 4, 1), (4,
1, 1), 0), out=buf1)
del buf0
buf2 = reinterpret_tensor(buf1, (4, 4), (4, 1), 0)
del buf1
triton_poi_fused_ge_mul_rsub_1[grid(16)](buf2, arg1_1, 16, XBLOCK=
16, num_warps=1, num_stages=1)
del arg1_1
return buf2,
class AllocatingLayerNew(Module):
"""The actor NN base its output for the case of full CSI on a continuous relaxation of the problem. Specifically it gives
a value for every user. This layer will start allocating to the most valuable bw until no more resources are available for
the least valuable users
"""
def __init__(self, Resource):
super(AllocatingLayerNew, self).__init__()
self.W = Resource
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| avranasa/DRL_Scheduling_Communications | AllocatingLayer | false | 6,284 | [
"MIT"
] | 1 | 2e6cb3a9599e43b73547f4281d82b1e5999271b7 | https://github.com/avranasa/DRL_Scheduling_Communications/tree/2e6cb3a9599e43b73547f4281d82b1e5999271b7 | from torch.nn import Module
import torch
from torch.nn.modules.module import Module
class Model(Module):
"""The actor NN base its output for the case of full CSI on a continuous relaxation of the problem. Specifically it gives
a value for every user. This layer will start allocating to the most valuable bw until no more resources are available for
the least valuable users
"""
def __init__(self, Resource):
super().__init__()
self.W = Resource
def forward(self, values, weights):
batchSize, Kusers = values.shape
assert list(weights.size()) == [batchSize, Kusers] and (values >= 0
).all()
VperW_diff = values.unsqueeze(dim=1).detach() - values.unsqueeze(dim=2
).detach()
assert list(VperW_diff.shape) == [batchSize, Kusers, Kusers]
Better_j_than_i = 1.0 * (VperW_diff >= 0)
Satisfying_Constr = self.W - torch.matmul(Better_j_than_i, weights.
unsqueeze(dim=2)).squeeze() >= 0
assert list(Satisfying_Constr.shape) == [batchSize, Kusers]
return Satisfying_Constr * weights
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [4]
|
TauSTE | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/te/cte662dz5uwvexkyn4i5k7zcpexn4oecbldxbr6yqmrjc6gzepr4.py
# Topologically Sorted Source Nodes: [gt, float_1], Original ATen: [aten.gt, aten._to_copy]
# Source node to ATen node mapping:
# float_1 => convert_element_type
# gt => gt
# Graph fragment:
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%arg0_1, 0.0), kwargs = {})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%gt, torch.float32), kwargs = {})
triton_poi_fused__to_copy_gt_0 = async_compile.triton('triton_poi_fused__to_copy_gt_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_gt_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_gt_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = tmp2.to(tl.float32)
tl.store(out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [gt, float_1], Original ATen: [aten.gt, aten._to_copy]
stream0 = get_raw_stream(0)
triton_poi_fused__to_copy_gt_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from torch.nn import Module
import torch
from typing import Any
import torch.nn.functional as F
class TauSTEFunction(torch.autograd.Function):
@staticmethod
def forward(ctx: 'Any', tau_threshold: 'float', input: 'Any') ->Any:
return (input > tau_threshold).float()
@staticmethod
def backward(ctx: 'Any', grad_output: 'Any') ->Any:
return None, F.hardtanh(grad_output)
class TauSTE(Module):
def __init__(self, tau_threshold: 'float'=0.0) ->None:
super(TauSTE, self).__init__()
self.tau_threshold = tau_threshold
def forward(self, batch: 'torch.Tensor') ->torch.Tensor:
return TauSTEFunction.apply(self.tau_threshold, batch)
def extra_repr(self) ->str:
return 'tau_threshold={}'.format(self.tau_threshold)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.nn import Module
from typing import Any
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy_gt_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = tmp2.to(tl.float32)
tl.store(out_ptr0 + x0, tmp3, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__to_copy_gt_0[grid(256)](arg0_1, buf0, 256, XBLOCK
=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class TauSTEFunction(torch.autograd.Function):
@staticmethod
def forward(ctx: 'Any', tau_threshold: 'float', input: 'Any') ->Any:
return (input > tau_threshold).float()
@staticmethod
def backward(ctx: 'Any', grad_output: 'Any') ->Any:
return None, F.hardtanh(grad_output)
class TauSTENew(Module):
def __init__(self, tau_threshold: 'float'=0.0) ->None:
super(TauSTENew, self).__init__()
self.tau_threshold = tau_threshold
def extra_repr(self) ->str:
return 'tau_threshold={}'.format(self.tau_threshold)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| atreyasha/spp-explainability | TauSTE | false | 6,285 | [
"MIT"
] | 1 | c959b837591cc1980d057a67f682e00b1f3e8e37 | https://github.com/atreyasha/spp-explainability/tree/c959b837591cc1980d057a67f682e00b1f3e8e37 | from torch.nn import Module
import torch
from typing import Any
import torch.nn.functional as F
class TauSTEFunction(torch.autograd.Function):
@staticmethod
def forward(ctx: 'Any', tau_threshold: 'float', input: 'Any') ->Any:
return (input > tau_threshold).float()
@staticmethod
def backward(ctx: 'Any', grad_output: 'Any') ->Any:
return None, F.hardtanh(grad_output)
class Model(Module):
def __init__(self, tau_threshold: 'float'=0.0) ->None:
super().__init__()
self.tau_threshold = tau_threshold
def forward(self, batch: 'torch.Tensor') ->torch.Tensor:
return TauSTEFunction.apply(self.tau_threshold, batch)
def extra_repr(self) ->str:
return 'tau_threshold={}'.format(self.tau_threshold)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
RoutingCapsules | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/6d/c6dttqk6xutp77btpuo4ywspudms2jqt6a26lv6ujp22nczd4jrp.py
# Topologically Sorted Source Nodes: [u_hat], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# u_hat => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = (xindex // 64)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x3), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/my/cmy25ejia5o2rat776uwsbvvmsxs5ygvdojzg6lcj5wdlcpt6zvw.py
# Topologically Sorted Source Nodes: [u_hat], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# u_hat => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 256
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ax/caxtwwdhx4z7xnzczowzqzedsuimqfsdl4ojybtxh57mrvl7r62n.py
# Topologically Sorted Source Nodes: [b, c, mul, s, pow_1, norm], Original ATen: [aten.zeros_like, aten._softmax, aten.mul, aten.sum, aten.pow]
# Source node to ATen node mapping:
# b => full_default
# c => amax, div, exp, sub, sum_1
# mul => mul
# norm => sum_3
# pow_1 => pow_1
# s => sum_2
# Graph fragment:
# %full_default : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([4, 1, 4, 4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%full_default, [2], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%full_default, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %view_2), kwargs = {})
# %sum_2 : [num_users=2] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [2], True), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_2, 2), kwargs = {})
# %sum_3 : [num_users=3] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [-1], True), kwargs = {})
triton_poi_fused__softmax_mul_pow_sum_zeros_like_2 = async_compile.triton('triton_poi_fused__softmax_mul_pow_sum_zeros_like_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_mul_pow_sum_zeros_like_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_mul_pow_sum_zeros_like_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp6 = tl.load(in_ptr0 + ((4*x0) + (256*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (64 + (4*x0) + (256*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (128 + (4*x0) + (256*x1)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (192 + (4*x0) + (256*x1)), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr0 + (1 + (4*x0) + (256*x1)), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr0 + (65 + (4*x0) + (256*x1)), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (129 + (4*x0) + (256*x1)), xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr0 + (193 + (4*x0) + (256*x1)), xmask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr0 + (2 + (4*x0) + (256*x1)), xmask, eviction_policy='evict_last')
tmp33 = tl.load(in_ptr0 + (66 + (4*x0) + (256*x1)), xmask, eviction_policy='evict_last')
tmp36 = tl.load(in_ptr0 + (130 + (4*x0) + (256*x1)), xmask, eviction_policy='evict_last')
tmp39 = tl.load(in_ptr0 + (194 + (4*x0) + (256*x1)), xmask, eviction_policy='evict_last')
tmp44 = tl.load(in_ptr0 + (3 + (4*x0) + (256*x1)), xmask, eviction_policy='evict_last')
tmp46 = tl.load(in_ptr0 + (67 + (4*x0) + (256*x1)), xmask, eviction_policy='evict_last')
tmp49 = tl.load(in_ptr0 + (131 + (4*x0) + (256*x1)), xmask, eviction_policy='evict_last')
tmp52 = tl.load(in_ptr0 + (195 + (4*x0) + (256*x1)), xmask, eviction_policy='evict_last')
tmp0 = 0.0
tmp1 = tl_math.exp(tmp0)
tmp2 = tmp1 + tmp1
tmp3 = tmp2 + tmp1
tmp4 = tmp3 + tmp1
tmp5 = tmp1 / tmp4
tmp7 = tmp5 * tmp6
tmp9 = tmp5 * tmp8
tmp10 = tmp7 + tmp9
tmp12 = tmp5 * tmp11
tmp13 = tmp10 + tmp12
tmp15 = tmp5 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp16 * tmp16
tmp19 = tmp5 * tmp18
tmp21 = tmp5 * tmp20
tmp22 = tmp19 + tmp21
tmp24 = tmp5 * tmp23
tmp25 = tmp22 + tmp24
tmp27 = tmp5 * tmp26
tmp28 = tmp25 + tmp27
tmp29 = tmp28 * tmp28
tmp30 = tmp17 + tmp29
tmp32 = tmp5 * tmp31
tmp34 = tmp5 * tmp33
tmp35 = tmp32 + tmp34
tmp37 = tmp5 * tmp36
tmp38 = tmp35 + tmp37
tmp40 = tmp5 * tmp39
tmp41 = tmp38 + tmp40
tmp42 = tmp41 * tmp41
tmp43 = tmp30 + tmp42
tmp45 = tmp5 * tmp44
tmp47 = tmp5 * tmp46
tmp48 = tmp45 + tmp47
tmp50 = tmp5 * tmp49
tmp51 = tmp48 + tmp50
tmp53 = tmp5 * tmp52
tmp54 = tmp51 + tmp53
tmp55 = tmp54 * tmp54
tmp56 = tmp43 + tmp55
tl.store(out_ptr0 + (x2), tmp56, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ks/ckscyft52mffu7fo4m6ucqhkorymbxszfq2kpaapnmfzmse3koc3.py
# Topologically Sorted Source Nodes: [b, c, mul, s, add, truediv, mul_1, sqrt, add_1, x_1, c_1, mul_3, s_1], Original ATen: [aten.zeros_like, aten._softmax, aten.mul, aten.sum, aten.add, aten.div, aten.sqrt]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# b => full_default
# c => amax, div, exp, sub, sum_1
# c_1 => div_3, exp_1, sum_4
# mul => mul
# mul_1 => mul_1
# mul_3 => mul_3
# s => sum_2
# s_1 => sum_5
# sqrt => sqrt
# truediv => div_1
# x_1 => div_2
# Graph fragment:
# %full_default : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([4, 1, 4, 4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%full_default, [2], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%full_default, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %view_2), kwargs = {})
# %sum_2 : [num_users=2] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [2], True), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_3, 1), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_3, %add), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_1, %sum_2), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%sum_3,), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt, 1e-08), kwargs = {})
# %div_2 : [num_users=3] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_1, %add_1), kwargs = {})
# %scalar_tensor_default : [num_users=2] = call_function[target=torch.ops.aten.scalar_tensor.default](args = (1,), kwargs = {dtype: torch.float32, device: cuda:0, pin_memory: False})
# %ge_scalar : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%div_2, 0), kwargs = {})
# %neg_default : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%scalar_tensor_default,), kwargs = {})
# %where_self : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%ge_scalar, %scalar_tensor_default, %neg_default), kwargs = {})
# %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_2, %where_self), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [2], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {})
# %mul_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where_self, %div_2), kwargs = {})
# %mul_tensor_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_tensor, %mul_tensor_1), kwargs = {})
# %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%mul_tensor_2,), kwargs = {})
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [2], True), kwargs = {})
# %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_1, %sum_4), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_3, %view_2), kwargs = {})
# %sum_5 : [num_users=2] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_3, [2], True), kwargs = {})
triton_poi_fused__softmax_add_div_mul_sqrt_sum_zeros_like_3 = async_compile.triton('triton_poi_fused__softmax_add_div_mul_sqrt_sum_zeros_like_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_div_mul_sqrt_sum_zeros_like_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_add_div_mul_sqrt_sum_zeros_like_3(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = (xindex // 4)
x2 = (xindex // 64)
x3 = xindex % 64
x5 = xindex
tmp0 = tl.load(in_ptr0 + (x4), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (x3 + (256*x2)), xmask)
tmp12 = tl.load(in_ptr1 + (64 + x3 + (256*x2)), xmask)
tmp15 = tl.load(in_ptr1 + (128 + x3 + (256*x2)), xmask)
tmp18 = tl.load(in_ptr1 + (192 + x3 + (256*x2)), xmask)
tmp1 = 1.0
tmp2 = tmp0 + tmp1
tmp3 = tmp0 / tmp2
tmp4 = 0.0
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp5 + tmp5
tmp7 = tmp6 + tmp5
tmp8 = tmp7 + tmp5
tmp9 = tmp5 / tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp9 * tmp12
tmp14 = tmp11 + tmp13
tmp16 = tmp9 * tmp15
tmp17 = tmp14 + tmp16
tmp19 = tmp9 * tmp18
tmp20 = tmp17 + tmp19
tmp21 = tmp3 * tmp20
tmp22 = libdevice.sqrt(tmp0)
tmp23 = 1e-08
tmp24 = tmp22 + tmp23
tmp25 = tmp21 / tmp24
tmp26 = tmp25 >= tmp4
tmp27 = -1.0
tmp28 = tl.where(tmp26, tmp1, tmp27)
tmp29 = tmp10 * tmp28
tmp30 = tmp12 * tmp28
tmp31 = triton_helpers.maximum(tmp29, tmp30)
tmp32 = tmp15 * tmp28
tmp33 = triton_helpers.maximum(tmp31, tmp32)
tmp34 = tmp18 * tmp28
tmp35 = triton_helpers.maximum(tmp33, tmp34)
tmp36 = tmp29 - tmp35
tmp37 = tmp28 * tmp25
tmp38 = tmp36 * tmp37
tmp39 = tl_math.exp(tmp38)
tmp40 = tmp30 - tmp35
tmp41 = tmp40 * tmp37
tmp42 = tl_math.exp(tmp41)
tmp43 = tmp39 + tmp42
tmp44 = tmp32 - tmp35
tmp45 = tmp44 * tmp37
tmp46 = tl_math.exp(tmp45)
tmp47 = tmp43 + tmp46
tmp48 = tmp34 - tmp35
tmp49 = tmp48 * tmp37
tmp50 = tl_math.exp(tmp49)
tmp51 = tmp47 + tmp50
tmp52 = tmp39 / tmp51
tmp53 = tmp52 * tmp10
tmp54 = tmp42 / tmp51
tmp55 = tmp54 * tmp12
tmp56 = tmp53 + tmp55
tmp57 = tmp46 / tmp51
tmp58 = tmp57 * tmp15
tmp59 = tmp56 + tmp58
tmp60 = tmp50 / tmp51
tmp61 = tmp60 * tmp18
tmp62 = tmp59 + tmp61
tl.store(out_ptr0 + (x5), tmp25, xmask)
tl.store(in_out_ptr0 + (x5), tmp62, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/pw/cpwf6totvtlynglzhnop5czfqitxnvzindzzqixm2gmwwmxfsc6v.py
# Topologically Sorted Source Nodes: [b_1, pow_2, norm_1, add_3, truediv_2, mul_4, sqrt_1, add_4, x_2, uv_1, b_2, c_2, mul_6, s_2], Original ATen: [aten.add, aten.pow, aten.sum, aten.div, aten.mul, aten.sqrt, aten._softmax]
# Source node to ATen node mapping:
# add_3 => add_3
# add_4 => add_4
# b_1 => mul_2
# b_2 => add_5
# c_2 => amax_2, div_6, exp_2, sub_2, sum_7
# mul_4 => mul_4
# mul_6 => mul_6
# norm_1 => sum_6
# pow_2 => pow_2
# s_2 => sum_8
# sqrt_1 => sqrt_1
# truediv_2 => div_4
# uv_1 => mul_5
# x_2 => div_5
# Graph fragment:
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_2, %div_2), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_5, 2), kwargs = {})
# %sum_6 : [num_users=3] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_2, [-1], True), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_6, 1), kwargs = {})
# %div_4 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_6, %add_3), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_4, %sum_5), kwargs = {})
# %sqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%sum_6,), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt_1, 1e-08), kwargs = {})
# %div_5 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_4, %add_4), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_2, %div_5), kwargs = {})
# %add_5 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %mul_5), kwargs = {})
# %amax_2 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add_5, [2], True), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_5, %amax_2), kwargs = {})
# %exp_2 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_2,), kwargs = {})
# %sum_7 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_2, [2], True), kwargs = {})
# %div_6 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_2, %sum_7), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_6, %view_2), kwargs = {})
# %sum_8 : [num_users=2] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_6, [2], True), kwargs = {})
triton_poi_fused__softmax_add_div_mul_pow_sqrt_sum_4 = async_compile.triton('triton_poi_fused__softmax_add_div_mul_pow_sqrt_sum_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_div_mul_pow_sqrt_sum_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_add_div_mul_pow_sqrt_sum_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x4 = xindex
x2 = xindex % 64
x3 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (x4), xmask)
tmp20 = tl.load(in_ptr1 + (x2 + (256*x3)), xmask)
tmp21 = tl.load(in_ptr2 + (x4), xmask)
tmp25 = tl.load(in_ptr1 + (64 + x2 + (256*x3)), xmask)
tmp30 = tl.load(in_ptr1 + (128 + x2 + (256*x3)), xmask)
tmp35 = tl.load(in_ptr1 + (192 + x2 + (256*x3)), xmask)
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp11 = 1.0
tmp12 = tmp10 + tmp11
tmp13 = tmp10 / tmp12
tmp15 = tmp13 * tmp14
tmp16 = libdevice.sqrt(tmp10)
tmp17 = 1e-08
tmp18 = tmp16 + tmp17
tmp19 = tmp15 / tmp18
tmp22 = tmp20 * tmp21
tmp23 = tmp20 * tmp19
tmp24 = tmp22 + tmp23
tmp26 = tmp25 * tmp21
tmp27 = tmp25 * tmp19
tmp28 = tmp26 + tmp27
tmp29 = triton_helpers.maximum(tmp24, tmp28)
tmp31 = tmp30 * tmp21
tmp32 = tmp30 * tmp19
tmp33 = tmp31 + tmp32
tmp34 = triton_helpers.maximum(tmp29, tmp33)
tmp36 = tmp35 * tmp21
tmp37 = tmp35 * tmp19
tmp38 = tmp36 + tmp37
tmp39 = triton_helpers.maximum(tmp34, tmp38)
tmp40 = tmp24 - tmp39
tmp41 = tl_math.exp(tmp40)
tmp42 = tmp28 - tmp39
tmp43 = tl_math.exp(tmp42)
tmp44 = tmp41 + tmp43
tmp45 = tmp33 - tmp39
tmp46 = tl_math.exp(tmp45)
tmp47 = tmp44 + tmp46
tmp48 = tmp38 - tmp39
tmp49 = tl_math.exp(tmp48)
tmp50 = tmp47 + tmp49
tmp51 = tmp41 / tmp50
tmp52 = tmp51 * tmp20
tmp53 = tmp43 / tmp50
tmp54 = tmp53 * tmp25
tmp55 = tmp52 + tmp54
tmp56 = tmp46 / tmp50
tmp57 = tmp56 * tmp30
tmp58 = tmp55 + tmp57
tmp59 = tmp49 / tmp50
tmp60 = tmp59 * tmp35
tmp61 = tmp58 + tmp60
tl.store(in_out_ptr0 + (x4), tmp61, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/vx/cvxq4muzevaqey3caqsmo4qq5qc526ua43ko3bs5mugstm6oxtxh.py
# Topologically Sorted Source Nodes: [pow_3, norm_2, add_6, truediv_4, mul_7, sqrt_2, add_7, x_3], Original ATen: [aten.pow, aten.sum, aten.add, aten.div, aten.mul, aten.sqrt]
# Source node to ATen node mapping:
# add_6 => add_6
# add_7 => add_7
# mul_7 => mul_7
# norm_2 => sum_9
# pow_3 => pow_3
# sqrt_2 => sqrt_2
# truediv_4 => div_7
# x_3 => div_8
# Graph fragment:
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_8, 2), kwargs = {})
# %sum_9 : [num_users=3] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, [-1], True), kwargs = {})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_9, 1), kwargs = {})
# %div_7 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_9, %add_6), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_7, %sum_8), kwargs = {})
# %sqrt_2 : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%sum_9,), kwargs = {})
# %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt_2, 1e-08), kwargs = {})
# %div_8 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_7, %add_7), kwargs = {})
triton_poi_fused_add_div_mul_pow_sqrt_sum_5 = async_compile.triton('triton_poi_fused_add_div_mul_pow_sqrt_sum_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_pow_sqrt_sum_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mul_pow_sqrt_sum_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp11 = 1.0
tmp12 = tmp10 + tmp11
tmp13 = tmp10 / tmp12
tmp15 = tmp13 * tmp14
tmp16 = libdevice.sqrt(tmp10)
tmp17 = 1e-08
tmp18 = tmp16 + tmp17
tmp19 = tmp15 / tmp18
tl.store(out_ptr0 + (x2), tmp19, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4, 4, 4, 4), (256, 256, 64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [u_hat], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(primals_1, buf0, 1024, grid=grid(1024), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((4, 1, 4, 4, 4, 4), (256, 256, 64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [u_hat], Original ATen: [aten.clone]
triton_poi_fused_clone_1.run(primals_2, buf1, 1024, grid=grid(1024), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [u_hat], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf0, (64, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf1, (64, 4, 4), (16, 4, 1), 0), out=buf2)
del buf1
buf3 = empty_strided_cuda((4, 1, 1, 4, 4, 1), (16, 64, 64, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [b, c, mul, s, pow_1, norm], Original ATen: [aten.zeros_like, aten._softmax, aten.mul, aten.sum, aten.pow]
triton_poi_fused__softmax_mul_pow_sum_zeros_like_2.run(buf2, buf3, 64, grid=grid(64), stream=stream0)
buf4 = empty_strided_cuda((4, 1, 1, 4, 4, 4), (64, 256, 256, 16, 4, 1), torch.float32)
buf6 = empty_strided_cuda((4, 1, 1, 4, 4, 4), (64, 256, 256, 16, 4, 1), torch.float32)
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [b, c, mul, s, add, truediv, mul_1, sqrt, add_1, x_1, c_1, mul_3, s_1], Original ATen: [aten.zeros_like, aten._softmax, aten.mul, aten.sum, aten.add, aten.div, aten.sqrt]
triton_poi_fused__softmax_add_div_mul_sqrt_sum_zeros_like_3.run(buf7, buf3, buf2, buf4, 256, grid=grid(256), stream=stream0)
del buf3
buf10 = empty_strided_cuda((4, 1, 1, 4, 4, 4), (64, 256, 256, 16, 4, 1), torch.float32)
buf11 = buf10; del buf10 # reuse
# Topologically Sorted Source Nodes: [b_1, pow_2, norm_1, add_3, truediv_2, mul_4, sqrt_1, add_4, x_2, uv_1, b_2, c_2, mul_6, s_2], Original ATen: [aten.add, aten.pow, aten.sum, aten.div, aten.mul, aten.sqrt, aten._softmax]
triton_poi_fused__softmax_add_div_mul_pow_sqrt_sum_4.run(buf11, buf7, buf2, buf4, 256, grid=grid(256), stream=stream0)
del buf4
buf12 = reinterpret_tensor(buf7, (4, 1, 1, 4, 4, 4), (64, 64, 64, 16, 4, 1), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [pow_3, norm_2, add_6, truediv_4, mul_7, sqrt_2, add_7, x_3], Original ATen: [aten.pow, aten.sum, aten.add, aten.div, aten.mul, aten.sqrt]
triton_poi_fused_add_div_mul_pow_sqrt_sum_5.run(buf11, buf12, 256, grid=grid(256), stream=stream0)
del buf11
return (reinterpret_tensor(buf12, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf2, reinterpret_tensor(buf0, (64, 4, 4), (16, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, 4, 4, 4, 4), (256, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
def squash(x, dim=-1, epsilon=1e-08):
norm = (x ** 2).sum(dim=dim, keepdim=True)
x = norm / (norm + 1) * x / (torch.sqrt(norm) + epsilon)
return x
class RoutingCapsules(nn.Module):
"""
input
capsules_num: new feature, num to duplicate
capsules_dim: new feature, each capsules' dim
in_capsules_num: last layer's capsules_num
in_capsules_dim: last layer's capsules_dim
"""
def __init__(self, capsules_num, capsules_dim, in_capsules_num,
in_capsules_dim, num_iterations=3):
super(RoutingCapsules, self).__init__()
self.capsules_num = capsules_num
self.capsules_dim = capsules_dim
self.in_capsules_num = in_capsules_num
self.in_capsules_dim = in_capsules_dim
self.num_iterations = num_iterations
self.W = nn.Parameter(torch.randn(1, capsules_num, in_capsules_num,
in_capsules_dim, capsules_dim))
def forward(self, x):
x.shape[0]
x = x.unsqueeze(1).unsqueeze(3)
u_hat = x @ self.W
b = torch.zeros_like(u_hat)
for i in range(self.num_iterations - 1):
"""
Softmax is applied on all of the input capsules, to calculate probs.
"""
c = F.softmax(b, dim=2)
s = (c * u_hat).sum(dim=2, keepdim=True)
v = squash(s)
uv = u_hat * v
b = b + uv
c = F.softmax(b, dim=2)
s = (c * u_hat).sum(dim=2, keepdim=True)
v = squash(s)
v = v.squeeze()
return v
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'capsules_num': 4, 'capsules_dim': 4, 'in_capsules_num': 4,
'in_capsules_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = xindex // 64
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 256
x2 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused__softmax_mul_pow_sum_zeros_like_2(in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp6 = tl.load(in_ptr0 + (4 * x0 + 256 * x1), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr0 + (64 + 4 * x0 + 256 * x1), xmask,
eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (128 + 4 * x0 + 256 * x1), xmask,
eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (192 + 4 * x0 + 256 * x1), xmask,
eviction_policy='evict_last')
tmp18 = tl.load(in_ptr0 + (1 + 4 * x0 + 256 * x1), xmask,
eviction_policy='evict_last')
tmp20 = tl.load(in_ptr0 + (65 + 4 * x0 + 256 * x1), xmask,
eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (129 + 4 * x0 + 256 * x1), xmask,
eviction_policy='evict_last')
tmp26 = tl.load(in_ptr0 + (193 + 4 * x0 + 256 * x1), xmask,
eviction_policy='evict_last')
tmp31 = tl.load(in_ptr0 + (2 + 4 * x0 + 256 * x1), xmask,
eviction_policy='evict_last')
tmp33 = tl.load(in_ptr0 + (66 + 4 * x0 + 256 * x1), xmask,
eviction_policy='evict_last')
tmp36 = tl.load(in_ptr0 + (130 + 4 * x0 + 256 * x1), xmask,
eviction_policy='evict_last')
tmp39 = tl.load(in_ptr0 + (194 + 4 * x0 + 256 * x1), xmask,
eviction_policy='evict_last')
tmp44 = tl.load(in_ptr0 + (3 + 4 * x0 + 256 * x1), xmask,
eviction_policy='evict_last')
tmp46 = tl.load(in_ptr0 + (67 + 4 * x0 + 256 * x1), xmask,
eviction_policy='evict_last')
tmp49 = tl.load(in_ptr0 + (131 + 4 * x0 + 256 * x1), xmask,
eviction_policy='evict_last')
tmp52 = tl.load(in_ptr0 + (195 + 4 * x0 + 256 * x1), xmask,
eviction_policy='evict_last')
tmp0 = 0.0
tmp1 = tl_math.exp(tmp0)
tmp2 = tmp1 + tmp1
tmp3 = tmp2 + tmp1
tmp4 = tmp3 + tmp1
tmp5 = tmp1 / tmp4
tmp7 = tmp5 * tmp6
tmp9 = tmp5 * tmp8
tmp10 = tmp7 + tmp9
tmp12 = tmp5 * tmp11
tmp13 = tmp10 + tmp12
tmp15 = tmp5 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp16 * tmp16
tmp19 = tmp5 * tmp18
tmp21 = tmp5 * tmp20
tmp22 = tmp19 + tmp21
tmp24 = tmp5 * tmp23
tmp25 = tmp22 + tmp24
tmp27 = tmp5 * tmp26
tmp28 = tmp25 + tmp27
tmp29 = tmp28 * tmp28
tmp30 = tmp17 + tmp29
tmp32 = tmp5 * tmp31
tmp34 = tmp5 * tmp33
tmp35 = tmp32 + tmp34
tmp37 = tmp5 * tmp36
tmp38 = tmp35 + tmp37
tmp40 = tmp5 * tmp39
tmp41 = tmp38 + tmp40
tmp42 = tmp41 * tmp41
tmp43 = tmp30 + tmp42
tmp45 = tmp5 * tmp44
tmp47 = tmp5 * tmp46
tmp48 = tmp45 + tmp47
tmp50 = tmp5 * tmp49
tmp51 = tmp48 + tmp50
tmp53 = tmp5 * tmp52
tmp54 = tmp51 + tmp53
tmp55 = tmp54 * tmp54
tmp56 = tmp43 + tmp55
tl.store(out_ptr0 + x2, tmp56, xmask)
@triton.jit
def triton_poi_fused__softmax_add_div_mul_sqrt_sum_zeros_like_3(in_out_ptr0,
in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex // 4
x2 = xindex // 64
x3 = xindex % 64
x5 = xindex
tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (x3 + 256 * x2), xmask)
tmp12 = tl.load(in_ptr1 + (64 + x3 + 256 * x2), xmask)
tmp15 = tl.load(in_ptr1 + (128 + x3 + 256 * x2), xmask)
tmp18 = tl.load(in_ptr1 + (192 + x3 + 256 * x2), xmask)
tmp1 = 1.0
tmp2 = tmp0 + tmp1
tmp3 = tmp0 / tmp2
tmp4 = 0.0
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp5 + tmp5
tmp7 = tmp6 + tmp5
tmp8 = tmp7 + tmp5
tmp9 = tmp5 / tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp9 * tmp12
tmp14 = tmp11 + tmp13
tmp16 = tmp9 * tmp15
tmp17 = tmp14 + tmp16
tmp19 = tmp9 * tmp18
tmp20 = tmp17 + tmp19
tmp21 = tmp3 * tmp20
tmp22 = libdevice.sqrt(tmp0)
tmp23 = 1e-08
tmp24 = tmp22 + tmp23
tmp25 = tmp21 / tmp24
tmp26 = tmp25 >= tmp4
tmp27 = -1.0
tmp28 = tl.where(tmp26, tmp1, tmp27)
tmp29 = tmp10 * tmp28
tmp30 = tmp12 * tmp28
tmp31 = triton_helpers.maximum(tmp29, tmp30)
tmp32 = tmp15 * tmp28
tmp33 = triton_helpers.maximum(tmp31, tmp32)
tmp34 = tmp18 * tmp28
tmp35 = triton_helpers.maximum(tmp33, tmp34)
tmp36 = tmp29 - tmp35
tmp37 = tmp28 * tmp25
tmp38 = tmp36 * tmp37
tmp39 = tl_math.exp(tmp38)
tmp40 = tmp30 - tmp35
tmp41 = tmp40 * tmp37
tmp42 = tl_math.exp(tmp41)
tmp43 = tmp39 + tmp42
tmp44 = tmp32 - tmp35
tmp45 = tmp44 * tmp37
tmp46 = tl_math.exp(tmp45)
tmp47 = tmp43 + tmp46
tmp48 = tmp34 - tmp35
tmp49 = tmp48 * tmp37
tmp50 = tl_math.exp(tmp49)
tmp51 = tmp47 + tmp50
tmp52 = tmp39 / tmp51
tmp53 = tmp52 * tmp10
tmp54 = tmp42 / tmp51
tmp55 = tmp54 * tmp12
tmp56 = tmp53 + tmp55
tmp57 = tmp46 / tmp51
tmp58 = tmp57 * tmp15
tmp59 = tmp56 + tmp58
tmp60 = tmp50 / tmp51
tmp61 = tmp60 * tmp18
tmp62 = tmp59 + tmp61
tl.store(out_ptr0 + x5, tmp25, xmask)
tl.store(in_out_ptr0 + x5, tmp62, xmask)
@triton.jit
def triton_poi_fused__softmax_add_div_mul_pow_sqrt_sum_4(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x4 = xindex
x2 = xindex % 64
x3 = xindex // 64
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + x4, xmask)
tmp20 = tl.load(in_ptr1 + (x2 + 256 * x3), xmask)
tmp21 = tl.load(in_ptr2 + x4, xmask)
tmp25 = tl.load(in_ptr1 + (64 + x2 + 256 * x3), xmask)
tmp30 = tl.load(in_ptr1 + (128 + x2 + 256 * x3), xmask)
tmp35 = tl.load(in_ptr1 + (192 + x2 + 256 * x3), xmask)
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp11 = 1.0
tmp12 = tmp10 + tmp11
tmp13 = tmp10 / tmp12
tmp15 = tmp13 * tmp14
tmp16 = libdevice.sqrt(tmp10)
tmp17 = 1e-08
tmp18 = tmp16 + tmp17
tmp19 = tmp15 / tmp18
tmp22 = tmp20 * tmp21
tmp23 = tmp20 * tmp19
tmp24 = tmp22 + tmp23
tmp26 = tmp25 * tmp21
tmp27 = tmp25 * tmp19
tmp28 = tmp26 + tmp27
tmp29 = triton_helpers.maximum(tmp24, tmp28)
tmp31 = tmp30 * tmp21
tmp32 = tmp30 * tmp19
tmp33 = tmp31 + tmp32
tmp34 = triton_helpers.maximum(tmp29, tmp33)
tmp36 = tmp35 * tmp21
tmp37 = tmp35 * tmp19
tmp38 = tmp36 + tmp37
tmp39 = triton_helpers.maximum(tmp34, tmp38)
tmp40 = tmp24 - tmp39
tmp41 = tl_math.exp(tmp40)
tmp42 = tmp28 - tmp39
tmp43 = tl_math.exp(tmp42)
tmp44 = tmp41 + tmp43
tmp45 = tmp33 - tmp39
tmp46 = tl_math.exp(tmp45)
tmp47 = tmp44 + tmp46
tmp48 = tmp38 - tmp39
tmp49 = tl_math.exp(tmp48)
tmp50 = tmp47 + tmp49
tmp51 = tmp41 / tmp50
tmp52 = tmp51 * tmp20
tmp53 = tmp43 / tmp50
tmp54 = tmp53 * tmp25
tmp55 = tmp52 + tmp54
tmp56 = tmp46 / tmp50
tmp57 = tmp56 * tmp30
tmp58 = tmp55 + tmp57
tmp59 = tmp49 / tmp50
tmp60 = tmp59 * tmp35
tmp61 = tmp58 + tmp60
tl.store(in_out_ptr0 + x4, tmp61, xmask)
@triton.jit
def triton_poi_fused_add_div_mul_pow_sqrt_sum_5(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp11 = 1.0
tmp12 = tmp10 + tmp11
tmp13 = tmp10 / tmp12
tmp15 = tmp13 * tmp14
tmp16 = libdevice.sqrt(tmp10)
tmp17 = 1e-08
tmp18 = tmp16 + tmp17
tmp19 = tmp15 / tmp18
tl.store(out_ptr0 + x2, tmp19, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4, 4, 4, 4), (256, 256, 64, 16, 4,
1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(1024)](primals_1, buf0, 1024, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 1, 4, 4, 4, 4), (256, 256, 64, 16, 4,
1), torch.float32)
triton_poi_fused_clone_1[grid(1024)](primals_2, buf1, 1024, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (64, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf1, (64, 4, 4), (16, 4, 1), 0), out=buf2)
del buf1
buf3 = empty_strided_cuda((4, 1, 1, 4, 4, 1), (16, 64, 64, 4, 1, 64
), torch.float32)
triton_poi_fused__softmax_mul_pow_sum_zeros_like_2[grid(64)](buf2,
buf3, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 1, 1, 4, 4, 4), (64, 256, 256, 16, 4,
1), torch.float32)
buf6 = empty_strided_cuda((4, 1, 1, 4, 4, 4), (64, 256, 256, 16, 4,
1), torch.float32)
buf7 = buf6
del buf6
triton_poi_fused__softmax_add_div_mul_sqrt_sum_zeros_like_3[grid(256)](
buf7, buf3, buf2, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf3
buf10 = empty_strided_cuda((4, 1, 1, 4, 4, 4), (64, 256, 256, 16, 4,
1), torch.float32)
buf11 = buf10
del buf10
triton_poi_fused__softmax_add_div_mul_pow_sqrt_sum_4[grid(256)](buf11,
buf7, buf2, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf4
buf12 = reinterpret_tensor(buf7, (4, 1, 1, 4, 4, 4), (64, 64, 64,
16, 4, 1), 0)
del buf7
triton_poi_fused_add_div_mul_pow_sqrt_sum_5[grid(256)](buf11, buf12,
256, XBLOCK=256, num_warps=4, num_stages=1)
del buf11
return reinterpret_tensor(buf12, (4, 4, 4, 4), (64, 16, 4, 1), 0
), buf2, reinterpret_tensor(buf0, (64, 4, 4), (16, 1, 4), 0)
def squash(x, dim=-1, epsilon=1e-08):
norm = (x ** 2).sum(dim=dim, keepdim=True)
x = norm / (norm + 1) * x / (torch.sqrt(norm) + epsilon)
return x
class RoutingCapsulesNew(nn.Module):
"""
input
capsules_num: new feature, num to duplicate
capsules_dim: new feature, each capsules' dim
in_capsules_num: last layer's capsules_num
in_capsules_dim: last layer's capsules_dim
"""
def __init__(self, capsules_num, capsules_dim, in_capsules_num,
in_capsules_dim, num_iterations=3):
super(RoutingCapsulesNew, self).__init__()
self.capsules_num = capsules_num
self.capsules_dim = capsules_dim
self.in_capsules_num = in_capsules_num
self.in_capsules_dim = in_capsules_dim
self.num_iterations = num_iterations
self.W = nn.Parameter(torch.randn(1, capsules_num, in_capsules_num,
in_capsules_dim, capsules_dim))
def forward(self, input_0):
primals_2 = self.W
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
| ashawkey/CapsNet.pytorch | RoutingCapsules | false | 6,286 | [
"MIT"
] | 1 | 3b796b572bbabe79cc445c35913cd3584733aedf | https://github.com/ashawkey/CapsNet.pytorch/tree/3b796b572bbabe79cc445c35913cd3584733aedf | import torch
import torch.nn as nn
import torch.nn.functional as F
def squash(x, dim=-1, epsilon=1e-08):
norm = (x ** 2).sum(dim=dim, keepdim=True)
x = norm / (norm + 1) * x / (torch.sqrt(norm) + epsilon)
return x
class Model(nn.Module):
"""
input
capsules_num: new feature, num to duplicate
capsules_dim: new feature, each capsules' dim
in_capsules_num: last layer's capsules_num
in_capsules_dim: last layer's capsules_dim
"""
def __init__(self, capsules_num, capsules_dim, in_capsules_num,
in_capsules_dim, num_iterations=3):
super().__init__()
self.capsules_num = capsules_num
self.capsules_dim = capsules_dim
self.in_capsules_num = in_capsules_num
self.in_capsules_dim = in_capsules_dim
self.num_iterations = num_iterations
self.W = nn.Parameter(torch.randn(1, capsules_num, in_capsules_num,
in_capsules_dim, capsules_dim))
def forward(self, x):
x.shape[0]
x = x.unsqueeze(1).unsqueeze(3)
u_hat = x @ self.W
b = torch.zeros_like(u_hat)
for i in range(self.num_iterations - 1):
"""
Softmax is applied on all of the input capsules, to calculate probs.
"""
c = F.softmax(b, dim=2)
s = (c * u_hat).sum(dim=2, keepdim=True)
v = squash(s)
uv = u_hat * v
b = b + uv
c = F.softmax(b, dim=2)
s = (c * u_hat).sum(dim=2, keepdim=True)
v = squash(s)
v = v.squeeze()
return v
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'capsules_num': 4, 'capsules_dim': 4, 'in_capsules_num': 4,
'in_capsules_dim': 4}]
|
MaxMarginRankingLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/7q/c7qo7xkoynuj5iw5frdzk4ubwdvllwhmpivznfp5teeegjzck2py.py
# Topologically Sorted Source Nodes: [x2_1, sub, sub_1, max_margin, mean], Original ATen: [aten.cat, aten.sub, aten.rsub, aten.relu, aten.mean]
# Source node to ATen node mapping:
# max_margin => relu
# mean => mean
# sub => sub
# sub_1 => sub_1
# x2_1 => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%view_2, %view_3],), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, %cat), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sub), kwargs = {})
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_1,), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%relu,), kwargs = {})
triton_per_fused_cat_mean_relu_rsub_sub_0 = async_compile.triton('triton_per_fused_cat_mean_relu_rsub_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 32],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_cat_mean_relu_rsub_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_cat_mean_relu_rsub_sub_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 32
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (5*((r0 % 16) // 4)), None, eviction_policy='evict_last')
tmp1 = r0
tmp2 = tl.full([1, 1], 0, tl.int64)
tmp3 = tmp1 >= tmp2
tmp4 = tl.full([1, 1], 16, tl.int64)
tmp5 = tmp1 < tmp4
tmp6 = tl.load(in_ptr0 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp5, eviction_policy='evict_last', other=0.0)
tmp7 = tmp1 >= tmp4
tmp8 = tl.full([1, 1], 32, tl.int64)
tmp9 = tmp1 < tmp8
tmp10 = tl.load(in_ptr0 + (tl.broadcast_to((4*(((-16) + r0) % 4)) + ((((-16) + r0) // 4) % 4), [XBLOCK, RBLOCK])), tmp7, eviction_policy='evict_last', other=0.0)
tmp11 = tl.where(tmp5, tmp6, tmp10)
tmp12 = tmp0 - tmp11
tmp13 = 1.0
tmp14 = tmp13 - tmp12
tmp15 = tl.full([1, 1], 0, tl.int32)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK])
tmp19 = tl.sum(tmp17, 1)[:, None]
tmp20 = 32.0
tmp21 = tmp19 / tmp20
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp21, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x2_1, sub, sub_1, max_margin, mean], Original ATen: [aten.cat, aten.sub, aten.rsub, aten.relu, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_cat_mean_relu_rsub_sub_0.run(buf1, arg0_1, 1, 32, grid=grid(1), stream=stream0)
del arg0_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
import torch.nn as nn
import torch as th
import torch.optim
import torch.utils.data
class MaxMarginRankingLoss(nn.Module):
def __init__(self, margin=1):
super(MaxMarginRankingLoss, self).__init__()
self.loss = th.nn.MarginRankingLoss(margin)
self.margin = margin
def forward(self, x):
n = x.size()[0]
x1 = th.diag(x)
x1 = x1.unsqueeze(1)
x1 = x1.expand(n, n)
x1 = x1.contiguous().view(-1, 1)
x1 = th.cat((x1, x1), 0)
x2 = x.view(-1, 1)
x3 = x.transpose(0, 1).contiguous().view(-1, 1)
x2 = th.cat((x2, x3), 0)
max_margin = F.relu(self.margin - (x1 - x2))
return max_margin.mean()
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch as th
import torch.optim
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_cat_mean_relu_rsub_sub_0(in_out_ptr0, in_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 5 * (r0 % 16 // 4), None, eviction_policy=
'evict_last')
tmp1 = r0
tl.full([1, 1], 0, tl.int64)
tmp4 = tl.full([1, 1], 16, tl.int64)
tmp5 = tmp1 < tmp4
tmp6 = tl.load(in_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp5,
eviction_policy='evict_last', other=0.0)
tmp7 = tmp1 >= tmp4
tl.full([1, 1], 32, tl.int64)
tmp10 = tl.load(in_ptr0 + tl.broadcast_to(4 * ((-16 + r0) % 4) + (-16 +
r0) // 4 % 4, [XBLOCK, RBLOCK]), tmp7, eviction_policy='evict_last',
other=0.0)
tmp11 = tl.where(tmp5, tmp6, tmp10)
tmp12 = tmp0 - tmp11
tmp13 = 1.0
tmp14 = tmp13 - tmp12
tmp15 = tl.full([1, 1], 0, tl.int32)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK])
tmp19 = tl.sum(tmp17, 1)[:, None]
tmp20 = 32.0
tmp21 = tmp19 / tmp20
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp21, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_cat_mean_relu_rsub_sub_0[grid(1)](buf1, arg0_1, 1,
32, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
return buf1,
class MaxMarginRankingLossNew(nn.Module):
def __init__(self, margin=1):
super(MaxMarginRankingLossNew, self).__init__()
self.loss = th.nn.MarginRankingLoss(margin)
self.margin = margin
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| awesome-archive/Video-to-Online-Platform | MaxMarginRankingLoss | false | 6,287 | [
"Apache-2.0"
] | 1 | 4f91724133a817e79bce91e0abbd46cf38a31167 | https://github.com/awesome-archive/Video-to-Online-Platform/tree/4f91724133a817e79bce91e0abbd46cf38a31167 | import torch
import torch.nn.functional as F
import torch.nn as nn
import torch as th
import torch.optim
import torch.utils.data
class Model(nn.Module):
def __init__(self, margin=1):
super().__init__()
self.loss = th.nn.MarginRankingLoss(margin)
self.margin = margin
def forward(self, x):
n = x.size()[0]
x1 = th.diag(x)
x1 = x1.unsqueeze(1)
x1 = x1.expand(n, n)
x1 = x1.contiguous().view(-1, 1)
x1 = th.cat((x1, x1), 0)
x2 = x.view(-1, 1)
x3 = x.transpose(0, 1).contiguous().view(-1, 1)
x2 = th.cat((x2, x3), 0)
max_margin = F.relu(self.margin - (x1 - x2))
return max_margin.mean()
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return []
|
WingLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/5y/c5yx2tmktzc5brhfqiwvzfmmrhkpiscn4gi6kct3s3fnisnd7xlo.py
# Topologically Sorted Source Nodes: [sub, delta, lt, truediv, add, log, mul, sub_1, losses, sum_1], Original ATen: [aten.sub, aten.abs, aten.lt, aten.div, aten.add, aten.log, aten.mul, aten.where, aten.sum]
# Source node to ATen node mapping:
# add => add
# delta => abs_1
# log => log
# losses => where
# lt => lt
# mul => mul
# sub => sub
# sub_1 => sub_1
# sum_1 => sum_1
# truediv => div
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %abs_1 : [num_users=3] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {})
# %lt : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%abs_1, 10.0), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%abs_1, 2.0), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, 1.0), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%log, 10.0), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%abs_1, -7.91759469228055), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%lt, %mul, %sub_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%where, [1, 2]), kwargs = {})
triton_per_fused_abs_add_div_log_lt_mul_sub_sum_where_0 = async_compile.triton('triton_per_fused_abs_add_div_log_lt_mul_sub_sum_where_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_add_div_log_lt_mul_sub_sum_where_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_abs_add_div_log_lt_mul_sub_sum_where_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 4
x1 = (xindex // 4)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*r2) + (64*x1)), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (x0 + (4*r2) + (64*x1)), xmask, other=0.0)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = 10.0
tmp5 = tmp3 < tmp4
tmp6 = 0.5
tmp7 = tmp3 * tmp6
tmp8 = 1.0
tmp9 = tmp7 + tmp8
tmp10 = tl_math.log(tmp9)
tmp11 = tmp10 * tmp4
tmp12 = -7.91759469228055
tmp13 = tmp3 - tmp12
tmp14 = tl.where(tmp5, tmp11, tmp13)
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tl.store(out_ptr0 + (x3), tmp18, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/yi/cyivqxc32riwmi4nsqtijcqefjxyivwafk4qjnscryb6cwpfnqim.py
# Topologically Sorted Source Nodes: [loss, mul_1], Original ATen: [aten.mean, aten.mul]
# Source node to ATen node mapping:
# loss => mean
# mul_1 => mul_1
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%sum_1, [0]), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 1.0), kwargs = {})
triton_poi_fused_mean_mul_1 = async_compile.triton('triton_poi_fused_mean_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mean_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0), xmask)
tmp3 = tl.load(in_ptr0 + (8 + x0), xmask)
tmp5 = tl.load(in_ptr0 + (12 + x0), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = 1.0
tmp10 = tmp8 * tmp9
tl.store(out_ptr0 + (x0), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sub, delta, lt, truediv, add, log, mul, sub_1, losses, sum_1], Original ATen: [aten.sub, aten.abs, aten.lt, aten.div, aten.add, aten.log, aten.mul, aten.where, aten.sum]
stream0 = get_raw_stream(0)
triton_per_fused_abs_add_div_log_lt_mul_sub_sum_where_0.run(arg0_1, arg1_1, buf0, 16, 16, grid=grid(16), stream=stream0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [loss, mul_1], Original ATen: [aten.mean, aten.mul]
triton_poi_fused_mean_mul_1.run(buf0, buf1, 4, grid=grid(4), stream=stream0)
del buf0
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn as nn
class WingLoss(nn.Module):
"""Wing Loss. paper ref: 'Wing Loss for Robust Facial Landmark Localisation
with Convolutional Neural Networks' Feng et al. CVPR'2018.
Args:
omega (float): Also referred to as width.
epsilon (float): Also referred to as curvature.
use_target_weight (bool): Option to use weighted MSE loss.
Different joint types may have different target weights.
loss_weight (float): Weight of the loss. Default: 1.0.
"""
def __init__(self, omega=10.0, epsilon=2.0, use_target_weight=False,
loss_weight=1.0):
super().__init__()
self.omega = omega
self.epsilon = epsilon
self.use_target_weight = use_target_weight
self.loss_weight = loss_weight
self.C = self.omega * (1.0 - math.log(1.0 + self.omega / self.epsilon))
def criterion(self, pred, target):
"""Criterion of wingloss.
Note:
- batch_size: N
- num_keypoints: K
- dimension of keypoints: D (D=2 or D=3)
Args:
pred (torch.Tensor[N, K, D]): Output regression.
target (torch.Tensor[N, K, D]): Target regression.
"""
delta = (target - pred).abs()
losses = torch.where(delta < self.omega, self.omega * torch.log(1.0 +
delta / self.epsilon), delta - self.C)
return torch.mean(torch.sum(losses, dim=[1, 2]), dim=0)
def forward(self, output, target, target_weight=None):
"""Forward function.
Note:
- batch_size: N
- num_keypoints: K
- dimension of keypoints: D (D=2 or D=3)
Args:
output (torch.Tensor[N, K, D]): Output regression.
target (torch.Tensor[N, K, D]): Target regression.
target_weight (torch.Tensor[N,K,D]):
Weights across different joint types.
"""
if self.use_target_weight:
assert target_weight is not None
loss = self.criterion(output * target_weight, target *
target_weight)
else:
loss = self.criterion(output, target)
return loss * self.loss_weight
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_add_div_log_lt_mul_sub_sum_where_0(in_ptr0,
in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 4
x1 = xindex // 4
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * r2 + 64 * x1), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (x0 + 4 * r2 + 64 * x1), xmask, other=0.0)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = 10.0
tmp5 = tmp3 < tmp4
tmp6 = 0.5
tmp7 = tmp3 * tmp6
tmp8 = 1.0
tmp9 = tmp7 + tmp8
tmp10 = tl_math.log(tmp9)
tmp11 = tmp10 * tmp4
tmp12 = -7.91759469228055
tmp13 = tmp3 - tmp12
tmp14 = tl.where(tmp5, tmp11, tmp13)
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tl.store(out_ptr0 + x3, tmp18, xmask)
@triton.jit
def triton_poi_fused_mean_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0), xmask)
tmp3 = tl.load(in_ptr0 + (8 + x0), xmask)
tmp5 = tl.load(in_ptr0 + (12 + x0), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = 1.0
tmp10 = tmp8 * tmp9
tl.store(out_ptr0 + x0, tmp10, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_abs_add_div_log_lt_mul_sub_sum_where_0[grid(16)](
arg0_1, arg1_1, buf0, 16, 16, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4,), (1,), torch.float32)
triton_poi_fused_mean_mul_1[grid(4)](buf0, buf1, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del buf0
return buf1,
class WingLossNew(nn.Module):
"""Wing Loss. paper ref: 'Wing Loss for Robust Facial Landmark Localisation
with Convolutional Neural Networks' Feng et al. CVPR'2018.
Args:
omega (float): Also referred to as width.
epsilon (float): Also referred to as curvature.
use_target_weight (bool): Option to use weighted MSE loss.
Different joint types may have different target weights.
loss_weight (float): Weight of the loss. Default: 1.0.
"""
def __init__(self, omega=10.0, epsilon=2.0, use_target_weight=False,
loss_weight=1.0):
super().__init__()
self.omega = omega
self.epsilon = epsilon
self.use_target_weight = use_target_weight
self.loss_weight = loss_weight
self.C = self.omega * (1.0 - math.log(1.0 + self.omega / self.epsilon))
def criterion(self, pred, target):
"""Criterion of wingloss.
Note:
- batch_size: N
- num_keypoints: K
- dimension of keypoints: D (D=2 or D=3)
Args:
pred (torch.Tensor[N, K, D]): Output regression.
target (torch.Tensor[N, K, D]): Target regression.
"""
delta = (target - pred).abs()
losses = torch.where(delta < self.omega, self.omega * torch.log(1.0 +
delta / self.epsilon), delta - self.C)
return torch.mean(torch.sum(losses, dim=[1, 2]), dim=0)
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| atoaiari/mmpose | WingLoss | false | 6,288 | [
"Apache-2.0"
] | 1 | 256a9117767008e8c33b4038a346aca12233e300 | https://github.com/atoaiari/mmpose/tree/256a9117767008e8c33b4038a346aca12233e300 | import math
import torch
import torch.nn as nn
class Model(nn.Module):
"""Wing Loss. paper ref: 'Wing Loss for Robust Facial Landmark Localisation
with Convolutional Neural Networks' Feng et al. CVPR'2018.
Args:
omega (float): Also referred to as width.
epsilon (float): Also referred to as curvature.
use_target_weight (bool): Option to use weighted MSE loss.
Different joint types may have different target weights.
loss_weight (float): Weight of the loss. Default: 1.0.
"""
def __init__(self, omega=10.0, epsilon=2.0, use_target_weight=False,
loss_weight=1.0):
super().__init__()
self.omega = omega
self.epsilon = epsilon
self.use_target_weight = use_target_weight
self.loss_weight = loss_weight
self.C = self.omega * (1.0 - math.log(1.0 + self.omega / self.epsilon))
def criterion(self, pred, target):
"""Criterion of wingloss.
Note:
- batch_size: N
- num_keypoints: K
- dimension of keypoints: D (D=2 or D=3)
Args:
pred (torch.Tensor[N, K, D]): Output regression.
target (torch.Tensor[N, K, D]): Target regression.
"""
delta = (target - pred).abs()
losses = torch.where(delta < self.omega, self.omega * torch.log(1.0 +
delta / self.epsilon), delta - self.C)
return torch.mean(torch.sum(losses, dim=[1, 2]), dim=0)
def forward(self, output, target, target_weight=None):
"""Forward function.
Note:
- batch_size: N
- num_keypoints: K
- dimension of keypoints: D (D=2 or D=3)
Args:
output (torch.Tensor[N, K, D]): Output regression.
target (torch.Tensor[N, K, D]): Target regression.
target_weight (torch.Tensor[N,K,D]):
Weights across different joint types.
"""
if self.use_target_weight:
assert target_weight is not None
loss = self.criterion(output * target_weight, target *
target_weight)
else:
loss = self.criterion(output, target)
return loss * self.loss_weight
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
SoftWingLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/un/cunwpec7dtp6lckksegntrqkwzophx7kdx4optoqpzwrrovam74k.py
# Topologically Sorted Source Nodes: [sub, delta, lt, truediv, add, log, mul, add_1, losses, sum_1], Original ATen: [aten.sub, aten.abs, aten.lt, aten.div, aten.add, aten.log, aten.mul, aten.where, aten.sum]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# delta => abs_1
# log => log
# losses => where
# lt => lt
# mul => mul
# sub => sub
# sum_1 => sum_1
# truediv => div
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %abs_1 : [num_users=3] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {})
# %lt : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%abs_1, 2.0), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%abs_1, 0.5), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, 1.0), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%log, 20.0), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, -30.188758248682007), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%lt, %abs_1, %add_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%where, [1, 2]), kwargs = {})
triton_per_fused_abs_add_div_log_lt_mul_sub_sum_where_0 = async_compile.triton('triton_per_fused_abs_add_div_log_lt_mul_sub_sum_where_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_add_div_log_lt_mul_sub_sum_where_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_abs_add_div_log_lt_mul_sub_sum_where_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 4
x1 = (xindex // 4)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*r2) + (64*x1)), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (x0 + (4*r2) + (64*x1)), xmask, other=0.0)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = 2.0
tmp5 = tmp3 < tmp4
tmp6 = tmp3 * tmp4
tmp7 = 1.0
tmp8 = tmp6 + tmp7
tmp9 = tl_math.log(tmp8)
tmp10 = 20.0
tmp11 = tmp9 * tmp10
tmp12 = -30.188758248682007
tmp13 = tmp11 + tmp12
tmp14 = tl.where(tmp5, tmp3, tmp13)
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tl.store(out_ptr0 + (x3), tmp18, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/yi/cyivqxc32riwmi4nsqtijcqefjxyivwafk4qjnscryb6cwpfnqim.py
# Topologically Sorted Source Nodes: [loss, mul_1], Original ATen: [aten.mean, aten.mul]
# Source node to ATen node mapping:
# loss => mean
# mul_1 => mul_1
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%sum_1, [0]), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 1.0), kwargs = {})
triton_poi_fused_mean_mul_1 = async_compile.triton('triton_poi_fused_mean_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mean_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0), xmask)
tmp3 = tl.load(in_ptr0 + (8 + x0), xmask)
tmp5 = tl.load(in_ptr0 + (12 + x0), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = 1.0
tmp10 = tmp8 * tmp9
tl.store(out_ptr0 + (x0), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sub, delta, lt, truediv, add, log, mul, add_1, losses, sum_1], Original ATen: [aten.sub, aten.abs, aten.lt, aten.div, aten.add, aten.log, aten.mul, aten.where, aten.sum]
stream0 = get_raw_stream(0)
triton_per_fused_abs_add_div_log_lt_mul_sub_sum_where_0.run(arg0_1, arg1_1, buf0, 16, 16, grid=grid(16), stream=stream0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [loss, mul_1], Original ATen: [aten.mean, aten.mul]
triton_poi_fused_mean_mul_1.run(buf0, buf1, 4, grid=grid(4), stream=stream0)
del buf0
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn as nn
class SoftWingLoss(nn.Module):
"""Soft Wing Loss 'Structure-Coherent Deep Feature Learning for Robust Face
Alignment' Lin et al. TIP'2021.
loss =
1. |x| , if |x| < omega1
2. omega2*ln(1+|x|/epsilon) + B, if |x| >= omega1
Args:
omega1 (float): The first threshold.
omega2 (float): The second threshold.
epsilon (float): Also referred to as curvature.
use_target_weight (bool): Option to use weighted MSE loss.
Different joint types may have different target weights.
loss_weight (float): Weight of the loss. Default: 1.0.
"""
def __init__(self, omega1=2.0, omega2=20.0, epsilon=0.5,
use_target_weight=False, loss_weight=1.0):
super().__init__()
self.omega1 = omega1
self.omega2 = omega2
self.epsilon = epsilon
self.use_target_weight = use_target_weight
self.loss_weight = loss_weight
self.B = self.omega1 - self.omega2 * math.log(1.0 + self.omega1 /
self.epsilon)
def criterion(self, pred, target):
"""Criterion of wingloss.
Note:
batch_size: N
num_keypoints: K
dimension of keypoints: D (D=2 or D=3)
Args:
pred (torch.Tensor[N, K, D]): Output regression.
target (torch.Tensor[N, K, D]): Target regression.
"""
delta = (target - pred).abs()
losses = torch.where(delta < self.omega1, delta, self.omega2 *
torch.log(1.0 + delta / self.epsilon) + self.B)
return torch.mean(torch.sum(losses, dim=[1, 2]), dim=0)
def forward(self, output, target, target_weight=None):
"""Forward function.
Note:
batch_size: N
num_keypoints: K
dimension of keypoints: D (D=2 or D=3)
Args:
output (torch.Tensor[N, K, D]): Output regression.
target (torch.Tensor[N, K, D]): Target regression.
target_weight (torch.Tensor[N, K, D]):
Weights across different joint types.
"""
if self.use_target_weight:
assert target_weight is not None
loss = self.criterion(output * target_weight, target *
target_weight)
else:
loss = self.criterion(output, target)
return loss * self.loss_weight
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_add_div_log_lt_mul_sub_sum_where_0(in_ptr0,
in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 4
x1 = xindex // 4
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * r2 + 64 * x1), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (x0 + 4 * r2 + 64 * x1), xmask, other=0.0)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = 2.0
tmp5 = tmp3 < tmp4
tmp6 = tmp3 * tmp4
tmp7 = 1.0
tmp8 = tmp6 + tmp7
tmp9 = tl_math.log(tmp8)
tmp10 = 20.0
tmp11 = tmp9 * tmp10
tmp12 = -30.188758248682007
tmp13 = tmp11 + tmp12
tmp14 = tl.where(tmp5, tmp3, tmp13)
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tl.store(out_ptr0 + x3, tmp18, xmask)
@triton.jit
def triton_poi_fused_mean_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0), xmask)
tmp3 = tl.load(in_ptr0 + (8 + x0), xmask)
tmp5 = tl.load(in_ptr0 + (12 + x0), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = 1.0
tmp10 = tmp8 * tmp9
tl.store(out_ptr0 + x0, tmp10, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_abs_add_div_log_lt_mul_sub_sum_where_0[grid(16)](
arg0_1, arg1_1, buf0, 16, 16, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4,), (1,), torch.float32)
triton_poi_fused_mean_mul_1[grid(4)](buf0, buf1, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del buf0
return buf1,
class SoftWingLossNew(nn.Module):
"""Soft Wing Loss 'Structure-Coherent Deep Feature Learning for Robust Face
Alignment' Lin et al. TIP'2021.
loss =
1. |x| , if |x| < omega1
2. omega2*ln(1+|x|/epsilon) + B, if |x| >= omega1
Args:
omega1 (float): The first threshold.
omega2 (float): The second threshold.
epsilon (float): Also referred to as curvature.
use_target_weight (bool): Option to use weighted MSE loss.
Different joint types may have different target weights.
loss_weight (float): Weight of the loss. Default: 1.0.
"""
def __init__(self, omega1=2.0, omega2=20.0, epsilon=0.5,
use_target_weight=False, loss_weight=1.0):
super().__init__()
self.omega1 = omega1
self.omega2 = omega2
self.epsilon = epsilon
self.use_target_weight = use_target_weight
self.loss_weight = loss_weight
self.B = self.omega1 - self.omega2 * math.log(1.0 + self.omega1 /
self.epsilon)
def criterion(self, pred, target):
"""Criterion of wingloss.
Note:
batch_size: N
num_keypoints: K
dimension of keypoints: D (D=2 or D=3)
Args:
pred (torch.Tensor[N, K, D]): Output regression.
target (torch.Tensor[N, K, D]): Target regression.
"""
delta = (target - pred).abs()
losses = torch.where(delta < self.omega1, delta, self.omega2 *
torch.log(1.0 + delta / self.epsilon) + self.B)
return torch.mean(torch.sum(losses, dim=[1, 2]), dim=0)
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| atoaiari/mmpose | SoftWingLoss | false | 6,289 | [
"Apache-2.0"
] | 1 | 256a9117767008e8c33b4038a346aca12233e300 | https://github.com/atoaiari/mmpose/tree/256a9117767008e8c33b4038a346aca12233e300 | import math
import torch
import torch.nn as nn
class Model(nn.Module):
"""Soft Wing Loss 'Structure-Coherent Deep Feature Learning for Robust Face
Alignment' Lin et al. TIP'2021.
loss =
1. |x| , if |x| < omega1
2. omega2*ln(1+|x|/epsilon) + B, if |x| >= omega1
Args:
omega1 (float): The first threshold.
omega2 (float): The second threshold.
epsilon (float): Also referred to as curvature.
use_target_weight (bool): Option to use weighted MSE loss.
Different joint types may have different target weights.
loss_weight (float): Weight of the loss. Default: 1.0.
"""
def __init__(self, omega1=2.0, omega2=20.0, epsilon=0.5,
use_target_weight=False, loss_weight=1.0):
super().__init__()
self.omega1 = omega1
self.omega2 = omega2
self.epsilon = epsilon
self.use_target_weight = use_target_weight
self.loss_weight = loss_weight
self.B = self.omega1 - self.omega2 * math.log(1.0 + self.omega1 /
self.epsilon)
def criterion(self, pred, target):
"""Criterion of wingloss.
Note:
batch_size: N
num_keypoints: K
dimension of keypoints: D (D=2 or D=3)
Args:
pred (torch.Tensor[N, K, D]): Output regression.
target (torch.Tensor[N, K, D]): Target regression.
"""
delta = (target - pred).abs()
losses = torch.where(delta < self.omega1, delta, self.omega2 *
torch.log(1.0 + delta / self.epsilon) + self.B)
return torch.mean(torch.sum(losses, dim=[1, 2]), dim=0)
def forward(self, output, target, target_weight=None):
"""Forward function.
Note:
batch_size: N
num_keypoints: K
dimension of keypoints: D (D=2 or D=3)
Args:
output (torch.Tensor[N, K, D]): Output regression.
target (torch.Tensor[N, K, D]): Target regression.
target_weight (torch.Tensor[N, K, D]):
Weights across different joint types.
"""
if self.use_target_weight:
assert target_weight is not None
loss = self.criterion(output * target_weight, target *
target_weight)
else:
loss = self.criterion(output, target)
return loss * self.loss_weight
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
SSLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/4q/c4qe4xzbzzs2dza2syihwn2y5k2nsn5obxx72cvuq46uot6eirkk.py
# Topologically Sorted Source Nodes: [sub_1, squared_error, mul, bg_onehot], Original ATen: [aten.sub, aten.pow, aten.mul, aten.rsub]
# Source node to ATen node mapping:
# bg_onehot => sub
# mul => mul
# squared_error => pow_1
# sub_1 => sub_1
# Graph fragment:
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %arg0_1), kwargs = {})
# %pow_1 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, %arg1_1), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {})
triton_poi_fused_mul_pow_rsub_sub_0 = async_compile.triton('triton_poi_fused_mul_pow_rsub_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_pow_rsub_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_pow_rsub_sub_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tmp3 * tmp0
tmp5 = 1.0
tmp6 = tmp5 - tmp0
tl.store(out_ptr0 + (x0), tmp3, xmask)
tl.store(out_ptr1 + (x0), tmp4, xmask)
tl.store(out_ptr2 + (x0), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sub_1, squared_error, mul, bg_onehot], Original ATen: [aten.sub, aten.pow, aten.mul, aten.rsub]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_pow_rsub_sub_0.run(arg1_1, arg0_1, buf0, buf1, buf2, 256, grid=grid(256), stream=stream0)
del arg0_1
del arg1_1
return (buf1, buf2, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn as nn
import torch.utils.data
import torch
def sum_tensor(inp, axes, keepdim=False):
axes = np.unique(axes).astype(int)
if keepdim:
for ax in axes:
inp = inp.sum(int(ax), keepdim=True)
else:
for ax in sorted(axes, reverse=True):
inp = inp.sum(int(ax))
return inp
class SSLoss(nn.Module):
def __init__(self, apply_nonlin=None, batch_dice=False, do_bg=True,
smooth=1.0, square=False):
"""
Sensitivity-Specifity loss
paper: http://www.rogertam.ca/Brosch_MICCAI_2015.pdf
tf code: https://github.com/NifTK/NiftyNet/blob/df0f86733357fdc92bbc191c8fec0dcf49aa5499/niftynet/layer/loss_segmentation.py#L392
"""
super(SSLoss, self).__init__()
self.square = square
self.do_bg = do_bg
self.batch_dice = batch_dice
self.apply_nonlin = apply_nonlin
self.smooth = smooth
self.r = 0.1
def forward(self, net_output, gt, loss_mask=None):
shp_x = net_output.shape
shp_y = gt.shape
with torch.no_grad():
if len(shp_x) != len(shp_y):
gt = gt.view((shp_y[0], 1, *shp_y[1:]))
if all([(i == j) for i, j in zip(net_output.shape, gt.shape)]):
y_onehot = gt
else:
gt = gt.long()
y_onehot = torch.zeros(shp_x)
if net_output.device.type == 'cuda':
y_onehot = y_onehot
y_onehot.scatter_(1, gt, 1)
if self.batch_dice:
axes = [0] + list(range(2, len(shp_x)))
else:
axes = list(range(2, len(shp_x)))
if self.apply_nonlin is not None:
net_output = self.apply_nonlin(net_output)
bg_onehot = 1 - y_onehot
squared_error = (y_onehot - net_output) ** 2
specificity_part = sum_tensor(squared_error * y_onehot, axes) / (
sum_tensor(y_onehot, axes) + self.smooth)
sensitivity_part = sum_tensor(squared_error * bg_onehot, axes) / (
sum_tensor(bg_onehot, axes) + self.smooth)
ss = self.r * specificity_part + (1 - self.r) * sensitivity_part
if not self.do_bg:
if self.batch_dice:
ss = ss[1:]
else:
ss = ss[:, 1:]
ss = ss.mean()
return ss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import numpy as np
import torch.nn as nn
import torch.utils.data
import torch
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_pow_rsub_sub_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tmp3 * tmp0
tmp5 = 1.0
tmp6 = tmp5 - tmp0
tl.store(out_ptr0 + x0, tmp3, xmask)
tl.store(out_ptr1 + x0, tmp4, xmask)
tl.store(out_ptr2 + x0, tmp6, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_pow_rsub_sub_0[grid(256)](arg1_1, arg0_1, buf0,
buf1, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf1, buf2, buf0
def sum_tensor(inp, axes, keepdim=False):
axes = np.unique(axes).astype(int)
if keepdim:
for ax in axes:
inp = inp.sum(int(ax), keepdim=True)
else:
for ax in sorted(axes, reverse=True):
inp = inp.sum(int(ax))
return inp
class SSLossNew(nn.Module):
def __init__(self, apply_nonlin=None, batch_dice=False, do_bg=True,
smooth=1.0, square=False):
"""
Sensitivity-Specifity loss
paper: http://www.rogertam.ca/Brosch_MICCAI_2015.pdf
tf code: https://github.com/NifTK/NiftyNet/blob/df0f86733357fdc92bbc191c8fec0dcf49aa5499/niftynet/layer/loss_segmentation.py#L392
"""
super(SSLossNew, self).__init__()
self.square = square
self.do_bg = do_bg
self.batch_dice = batch_dice
self.apply_nonlin = apply_nonlin
self.smooth = smooth
self.r = 0.1
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| ayanglab/HDL | SSLoss | false | 6,290 | [
"Apache-2.0"
] | 1 | 5ff778d713331671ffa85e9fb63378d8c0a57769 | https://github.com/ayanglab/HDL/tree/5ff778d713331671ffa85e9fb63378d8c0a57769 | import torch
import numpy as np
import torch.nn as nn
import torch.utils.data
import torch
def sum_tensor(inp, axes, keepdim=False):
axes = np.unique(axes).astype(int)
if keepdim:
for ax in axes:
inp = inp.sum(int(ax), keepdim=True)
else:
for ax in sorted(axes, reverse=True):
inp = inp.sum(int(ax))
return inp
class Model(nn.Module):
def __init__(self, apply_nonlin=None, batch_dice=False, do_bg=True,
smooth=1.0, square=False):
"""
Sensitivity-Specifity loss
paper: http://www.rogertam.ca/Brosch_MICCAI_2015.pdf
tf code: https://github.com/NifTK/NiftyNet/blob/df0f86733357fdc92bbc191c8fec0dcf49aa5499/niftynet/layer/loss_segmentation.py#L392
"""
super().__init__()
self.square = square
self.do_bg = do_bg
self.batch_dice = batch_dice
self.apply_nonlin = apply_nonlin
self.smooth = smooth
self.r = 0.1
def forward(self, net_output, gt, loss_mask=None):
shp_x = net_output.shape
shp_y = gt.shape
with torch.no_grad():
if len(shp_x) != len(shp_y):
gt = gt.view((shp_y[0], 1, *shp_y[1:]))
if all([(i == j) for i, j in zip(net_output.shape, gt.shape)]):
y_onehot = gt
else:
gt = gt.long()
y_onehot = torch.zeros(shp_x)
if net_output.device.type == 'cuda':
y_onehot = y_onehot
y_onehot.scatter_(1, gt, 1)
if self.batch_dice:
axes = [0] + list(range(2, len(shp_x)))
else:
axes = list(range(2, len(shp_x)))
if self.apply_nonlin is not None:
net_output = self.apply_nonlin(net_output)
bg_onehot = 1 - y_onehot
squared_error = (y_onehot - net_output) ** 2
specificity_part = sum_tensor(squared_error * y_onehot, axes) / (
sum_tensor(y_onehot, axes) + self.smooth)
sensitivity_part = sum_tensor(squared_error * bg_onehot, axes) / (
sum_tensor(bg_onehot, axes) + self.smooth)
ss = self.r * specificity_part + (1 - self.r) * sensitivity_part
if not self.do_bg:
if self.batch_dice:
ss = ss[1:]
else:
ss = ss[:, 1:]
ss = ss.mean()
return ss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
ScaledDotProductAttention | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/vp/cvpfqtmyqyejplddg35qfvsydx2nitayg4a7ndcskm65y477g3hn.py
# Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attn_2 => exp_1
# Graph fragment:
# %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%bmm, 1), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [2], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {})
# %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 4), kwargs = {})
# %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {})
# %mul_tensor_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%bmm, 1), kwargs = {})
# %amax_default_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_1, [2], True), kwargs = {})
# %sub_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_1, %amax_default_1), kwargs = {})
# %div_tensor_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_1, 4), kwargs = {})
triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.25
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + (x2), tmp17, xmask)
tl.store(out_ptr1 + (x2), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/kj/ckjtlefzavjukjsytvkak6ek26zmzexpcbnlwelx4k5kascjxlf3.py
# Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attn_2 => div_1, sum_2
# Graph fragment:
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [2], True), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_1, %sum_2), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/dp/cdprejhdxjipmbhepumj5lwmh7q3n4bwayslcvlayzluxrhcsubr.py
# Topologically Sorted Source Nodes: [log_attn], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# log_attn => exp, log, sub_1, sum_1
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_1,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_tensor_1, %log), kwargs = {})
triton_poi_fused__log_softmax_2 = async_compile.triton('triton_poi_fused__log_softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten.bmm]
extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), (16, 1, 4), 0), out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_0.run(buf0, buf1, buf4, 64, grid=grid(64), stream=stream0)
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf1, buf2, 64, grid=grid(64), stream=stream0)
buf3 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.bmm]
extern_kernels.bmm(buf2, arg2_1, out=buf3)
del arg2_1
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [log_attn], Original ATen: [aten._log_softmax]
triton_poi_fused__log_softmax_2.run(buf4, buf5, 64, grid=grid(64), stream=stream0)
del buf4
return (buf3, buf2, buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
import torch.nn as nn
class ScaledDotProductAttention(nn.Module):
""" Scaled Dot-Product Attention """
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
self.softmax = nn.Softmax(dim=2)
def forward(self, q, k, v):
attn = torch.bmm(q, k.transpose(1, 2))
attn = attn / self.temperature
log_attn = F.log_softmax(attn, 2)
attn = self.softmax(attn)
attn = self.dropout(attn)
output = torch.bmm(attn, v)
return output, attn, log_attn
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [[], {'temperature': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.25
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + x2, tmp17, xmask)
tl.store(out_ptr1 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), (
16, 1, 4), 0), out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(64)](buf0, buf1, buf4, 64, XBLOCK=
64, num_warps=1, num_stages=1)
buf2 = buf0
del buf0
triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = buf1
del buf1
extern_kernels.bmm(buf2, arg2_1, out=buf3)
del arg2_1
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__log_softmax_2[grid(64)](buf4, buf5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf4
return buf3, buf2, buf5
class ScaledDotProductAttentionNew(nn.Module):
""" Scaled Dot-Product Attention """
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
self.softmax = nn.Softmax(dim=2)
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0], output[1], output[2]
| awesome-archive/FEAT | ScaledDotProductAttention | false | 6,291 | [
"MIT"
] | 1 | 940d525fcbf2a40528d284392a03e4b0193344a7 | https://github.com/awesome-archive/FEAT/tree/940d525fcbf2a40528d284392a03e4b0193344a7 | import torch
import torch.nn.functional as F
import torch.nn as nn
class Model(nn.Module):
""" Scaled Dot-Product Attention """
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
self.softmax = nn.Softmax(dim=2)
def forward(self, q, k, v):
attn = torch.bmm(q, k.transpose(1, 2))
attn = attn / self.temperature
log_attn = F.log_softmax(attn, 2)
attn = self.softmax(attn)
attn = self.dropout(attn)
output = torch.bmm(attn, v)
return output, attn, log_attn
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [4]
|
GraphConvolution | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/3v/c3v7n6hzyrv5pn6uojl3hf6tko347a672spakigdzmqm7ebd4zwl.py
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# relu => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_4,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(in_out_ptr0 + (x0), tmp2, xmask)
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [support], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(primals_4, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), out=buf1)
del buf0
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf1 # reuse
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf2, buf3, 256, grid=grid(256), stream=stream0)
return (buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf3, reinterpret_tensor(primals_4, (16, 4, 4), (16, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class GraphConvolution(nn.Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, dropout=0.3):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.linear = nn.Linear(in_features, out_features)
self.dropout = nn.Dropout(dropout)
def forward(self, x, adj):
support = self.linear(x)
output = adj.matmul(support)
output = self.dropout(F.relu(output))
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(in_out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_4, (16, 4, 4), (16, 4,
1), 0), reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0),
out=buf1)
del buf0
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf2, buf3,
256, XBLOCK=128, num_warps=4, num_stages=1)
return buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf3, reinterpret_tensor(primals_4, (16, 4, 4), (16, 1, 4), 0)
class GraphConvolutionNew(nn.Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, dropout=0.3):
super(GraphConvolutionNew, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.linear = nn.Linear(in_features, out_features)
self.dropout = nn.Dropout(dropout)
def forward(self, input_0, input_1):
primals_1 = self.linear.weight
primals_2 = self.linear.bias
primals_3 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| ayyyq/T-LSTM | GraphConvolution | false | 6,292 | [
"MIT"
] | 1 | 36dbc88ac710d3925851cd87c2368ecfc7061b70 | https://github.com/ayyyq/T-LSTM/tree/36dbc88ac710d3925851cd87c2368ecfc7061b70 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, dropout=0.3):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.linear = nn.Linear(in_features, out_features)
self.dropout = nn.Dropout(dropout)
def forward(self, x, adj):
support = self.linear(x)
output = adj.matmul(support)
output = self.dropout(F.relu(output))
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
LayerNorm | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/tc/ctcoccnojrifwsjhb4gqgfu5kxpt6dvdpv4qwca7cbgn27ktptbk.py
# Topologically Sorted Source Nodes: [mean, sub, std, add, out, mul, add_1], Original ATen: [aten.mean, aten.sub, aten.std, aten.add, aten.div, aten.mul]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# mean => mean
# mul => mul
# out => div
# std => sqrt, var
# sub => sub
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %mean), kwargs = {})
# %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%primals_1, [-1]), kwargs = {correction: 1.0, keepdim: True})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%var,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt, 1e-06), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %add), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %div), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_3), kwargs = {})
triton_poi_fused_add_div_mean_mul_std_sub_0 = async_compile.triton('triton_poi_fused_add_div_mean_mul_std_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_std_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mean_mul_std_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = 4.0
tmp10 = tmp8 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp2 - tmp10
tmp13 = tmp12 * tmp12
tmp14 = tmp3 - tmp10
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp10
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp7 - tmp10
tmp21 = tmp20 * tmp20
tmp22 = tmp19 + tmp21
tmp23 = 3.0
tmp24 = tmp22 / tmp23
tmp25 = libdevice.sqrt(tmp24)
tmp26 = 1e-06
tmp27 = tmp25 + tmp26
tmp28 = tmp11 / tmp27
tmp29 = tmp0 * tmp28
tmp31 = tmp29 + tmp30
tl.store(out_ptr0 + (x2), tmp31, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4), (4, 1))
assert_size_stride(primals_3, (1, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mean, sub, std, add, out, mul, add_1], Original ATen: [aten.mean, aten.sub, aten.std, aten.add, aten.div, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_mean_mul_std_sub_0.run(primals_2, primals_1, primals_3, buf0, 256, grid=grid(256), stream=stream0)
del primals_2
del primals_3
return (buf0, primals_1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.optim
class LayerNorm(nn.Module):
"""A Layer Normalization layer.
Lei Ba, Jimmy, Jamie Ryan Kiros, and Geoffrey E. Hinton.
arXiv preprint arXiv:1607.06450 (2016).
"""
def __init__(self, dim):
super(LayerNorm, self).__init__()
self.bias = nn.Parameter(torch.zeros(1, dim))
self.gain = nn.Parameter(torch.ones(1, dim))
def forward(self, x):
out = (x - x.mean(-1, keepdim=True)) / (x.std(-1, keepdim=True) + 1e-06
)
return self.gain * out + self.bias
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mean_mul_std_sub_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = 4.0
tmp10 = tmp8 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp2 - tmp10
tmp13 = tmp12 * tmp12
tmp14 = tmp3 - tmp10
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp10
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp7 - tmp10
tmp21 = tmp20 * tmp20
tmp22 = tmp19 + tmp21
tmp23 = 3.0
tmp24 = tmp22 / tmp23
tmp25 = libdevice.sqrt(tmp24)
tmp26 = 1e-06
tmp27 = tmp25 + tmp26
tmp28 = tmp11 / tmp27
tmp29 = tmp0 * tmp28
tmp31 = tmp29 + tmp30
tl.store(out_ptr0 + x2, tmp31, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4), (4, 1))
assert_size_stride(primals_3, (1, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mean_mul_std_sub_0[grid(256)](primals_2,
primals_1, primals_3, buf0, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_2
del primals_3
return buf0, primals_1
class LayerNormNew(nn.Module):
"""A Layer Normalization layer.
Lei Ba, Jimmy, Jamie Ryan Kiros, and Geoffrey E. Hinton.
arXiv preprint arXiv:1607.06450 (2016).
"""
def __init__(self, dim):
super(LayerNormNew, self).__init__()
self.bias = nn.Parameter(torch.zeros(1, dim))
self.gain = nn.Parameter(torch.ones(1, dim))
def forward(self, input_0):
primals_2 = self.bias
primals_3 = self.gain
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| awesome-archive/nmtpytorch | LayerNorm | false | 6,293 | [
"MIT"
] | 1 | 7c0ea21b29fc85a1f30ef4400d62b9d8e3d88be4 | https://github.com/awesome-archive/nmtpytorch/tree/7c0ea21b29fc85a1f30ef4400d62b9d8e3d88be4 | import torch
import torch.nn as nn
import torch.optim
class Model(nn.Module):
"""A Layer Normalization layer.
Lei Ba, Jimmy, Jamie Ryan Kiros, and Geoffrey E. Hinton.
arXiv preprint arXiv:1607.06450 (2016).
"""
def __init__(self, dim):
super().__init__()
self.bias = nn.Parameter(torch.zeros(1, dim))
self.gain = nn.Parameter(torch.ones(1, dim))
def forward(self, x):
out = (x - x.mean(-1, keepdim=True)) / (x.std(-1, keepdim=True) + 1e-06
)
return self.gain * out + self.bias
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
AttentionModule | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/td/ctdybbibnws4d7ukbk3fpn35zkgapxylowdhzwx7vgsllncbdrxa.py
# Topologically Sorted Source Nodes: [conv2d, out], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# out => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/go/cgofqcgduqrtcjakfd7uk3wkcrpwsqxispluihwsstry6ekodk2u.py
# Topologically Sorted Source Nodes: [conv2d_2, out_2], Original ATen: [aten.convolution, aten.sigmoid]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# out_2 => sigmoid
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_2,), kwargs = {})
triton_poi_fused_convolution_sigmoid_1 = async_compile.triton('triton_poi_fused_convolution_sigmoid_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_sigmoid_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_7, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d, out], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, out_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_0.run(buf3, primals_5, 256, grid=grid(256), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 1, 4, 4), (16, 16, 4, 1))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [conv2d_2, out_2], Original ATen: [aten.convolution, aten.sigmoid]
triton_poi_fused_convolution_sigmoid_1.run(buf5, primals_7, 64, grid=grid(64), stream=stream0)
del primals_7
return (buf5, primals_1, primals_3, primals_4, primals_6, buf1, buf3, buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class AttentionModule(nn.Module):
""" A neural module that takes a feature map, attends to the features, and
produces an attention.
"""
def __init__(self, dim):
super().__init__()
self.conv1 = nn.Conv2d(dim, dim, kernel_size=3, padding=1)
self.conv2 = nn.Conv2d(dim, dim, kernel_size=3, padding=1)
self.conv3 = nn.Conv2d(dim, 1, kernel_size=1, padding=0)
torch.nn.init.kaiming_normal_(self.conv1.weight)
torch.nn.init.kaiming_normal_(self.conv2.weight)
torch.nn.init.kaiming_normal_(self.conv3.weight)
self.dim = dim
def forward(self, feats):
out = F.relu(self.conv1(feats))
out = F.relu(self.conv2(out))
out = torch.sigmoid(self.conv3(out))
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_sigmoid_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_7, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(256)](buf1, primals_2, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_0[grid(256)](buf3, primals_5, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 1, 4, 4), (16, 16, 4, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_sigmoid_1[grid(64)](buf5, primals_7,
64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_7
return buf5, primals_1, primals_3, primals_4, primals_6, buf1, buf3, buf5
class AttentionModuleNew(nn.Module):
""" A neural module that takes a feature map, attends to the features, and
produces an attention.
"""
def __init__(self, dim):
super().__init__()
self.conv1 = nn.Conv2d(dim, dim, kernel_size=3, padding=1)
self.conv2 = nn.Conv2d(dim, dim, kernel_size=3, padding=1)
self.conv3 = nn.Conv2d(dim, 1, kernel_size=1, padding=0)
torch.nn.init.kaiming_normal_(self.conv1.weight)
torch.nn.init.kaiming_normal_(self.conv2.weight)
torch.nn.init.kaiming_normal_(self.conv3.weight)
self.dim = dim
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| aymenx17/ShapeCount | AttentionModule | false | 6,294 | [
"Apache-2.0"
] | 1 | 6d2fb780684335ccd0127b3084bf40674203bcf1 | https://github.com/aymenx17/ShapeCount/tree/6d2fb780684335ccd0127b3084bf40674203bcf1 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
""" A neural module that takes a feature map, attends to the features, and
produces an attention.
"""
def __init__(self, dim):
super().__init__()
self.conv1 = nn.Conv2d(dim, dim, kernel_size=3, padding=1)
self.conv2 = nn.Conv2d(dim, dim, kernel_size=3, padding=1)
self.conv3 = nn.Conv2d(dim, 1, kernel_size=1, padding=0)
torch.nn.init.kaiming_normal_(self.conv1.weight)
torch.nn.init.kaiming_normal_(self.conv2.weight)
torch.nn.init.kaiming_normal_(self.conv3.weight)
self.dim = dim
def forward(self, feats):
out = F.relu(self.conv1(feats))
out = F.relu(self.conv2(out))
out = torch.sigmoid(self.conv3(out))
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
GDiceLossV2 | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/io/ciova4kygjmtphlfmjc5zopchbwqwrxkqt77t37wnivl3mslnv6q.py
# Topologically Sorted Source Nodes: [transposed_1, target_sum, mul, clamp, truediv], Original ATen: [aten.clone, aten.sum, aten.mul, aten.clamp, aten.reciprocal]
# Source node to ATen node mapping:
# clamp => clamp_min
# mul => mul
# target_sum => sum_1
# transposed_1 => clone_1
# truediv => mul_1, reciprocal
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_1,), kwargs = {memory_format: torch.contiguous_format})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view_1, [-1]), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, %sum_1), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%mul, 1e-05), kwargs = {})
# %reciprocal : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%clamp_min,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%reciprocal, 1.0), kwargs = {})
triton_per_fused_clamp_clone_mul_reciprocal_sum_0 = async_compile.triton('triton_per_fused_clamp_clone_mul_reciprocal_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_clamp_clone_mul_reciprocal_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_clamp_clone_mul_reciprocal_sum_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex % 16
r2 = (rindex // 16)
x0 = xindex
r3 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0) + (64*r2)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = tmp4 * tmp4
tmp6 = 1e-05
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tl.full([1, 1], 1, tl.int32)
tmp9 = tmp8 / tmp7
tmp10 = 1.0
tmp11 = tmp9 * tmp10
tl.store(out_ptr0 + (r3 + (64*x0)), tmp0, xmask)
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/j4/cj43tvt2ej5tb4b45yykcp3vlqrhx3dhcrcrr3xxldx5ndw4whmn.py
# Topologically Sorted Source Nodes: [transposed], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# transposed => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16) % 4
x2 = (xindex // 64)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x2) + (64*x1)), xmask)
tl.store(out_ptr0 + (x3), tmp0, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [transposed_1, target_sum, mul, clamp, truediv], Original ATen: [aten.clone, aten.sum, aten.mul, aten.clamp, aten.reciprocal]
stream0 = get_raw_stream(0)
triton_per_fused_clamp_clone_mul_reciprocal_sum_0.run(buf2, arg1_1, buf0, 4, 64, grid=grid(4), stream=stream0)
del arg1_1
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [transposed], Original ATen: [aten.clone]
triton_poi_fused_clone_1.run(arg0_1, buf3, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf2, reinterpret_tensor(buf3, (4, 64), (64, 1), 0), reinterpret_tensor(buf0, (4, 64), (64, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
import torch
from torch.autograd import Variable
def flatten(tensor):
"""Flattens a given tensor such that the channel axis is first.
The shapes are transformed as follows:
(N, C, D, H, W) -> (C, N * D * H * W)
"""
C = tensor.size(1)
axis_order = (1, 0) + tuple(range(2, tensor.dim()))
transposed = tensor.permute(axis_order).contiguous()
return transposed.view(C, -1)
class GDiceLossV2(nn.Module):
def __init__(self, apply_nonlin=None, smooth=1e-05):
"""
Generalized Dice;
Copy from: https://github.com/wolny/pytorch-3dunet/blob/6e5a24b6438f8c631289c10638a17dea14d42051/unet3d/losses.py#L75
paper: https://arxiv.org/pdf/1707.03237.pdf
tf code: https://github.com/NifTK/NiftyNet/blob/dev/niftynet/layer/loss_segmentation.py#L279
"""
super(GDiceLossV2, self).__init__()
self.apply_nonlin = apply_nonlin
self.smooth = smooth
def forward(self, net_output, gt):
shp_x = net_output.shape
shp_y = gt.shape
with torch.no_grad():
if len(shp_x) != len(shp_y):
gt = gt.view((shp_y[0], 1, *shp_y[1:]))
if all([(i == j) for i, j in zip(net_output.shape, gt.shape)]):
y_onehot = gt
else:
gt = gt.long()
y_onehot = torch.zeros(shp_x)
if net_output.device.type == 'cuda':
y_onehot = y_onehot
y_onehot.scatter_(1, gt, 1)
if self.apply_nonlin is not None:
net_output = self.apply_nonlin(net_output)
input = flatten(net_output)
target = flatten(y_onehot)
target = target.float()
target_sum = target.sum(-1)
class_weights = Variable(1.0 / (target_sum * target_sum).clamp(min=
self.smooth), requires_grad=False)
intersect = (input * target).sum(-1) * class_weights
intersect = intersect.sum()
denominator = ((input + target).sum(-1) * class_weights).sum()
return -2.0 * intersect / denominator.clamp(min=self.smooth)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data
import torch
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_clamp_clone_mul_reciprocal_sum_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex % 16
r2 = rindex // 16
x0 = xindex
r3 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0 + 64 * r2), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = tmp4 * tmp4
tmp6 = 1e-05
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tl.full([1, 1], 1, tl.int32)
tmp9 = tmp8 / tmp7
tmp10 = 1.0
tmp11 = tmp9 * tmp10
tl.store(out_ptr0 + (r3 + 64 * x0), tmp0, xmask)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp11, xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16 % 4
x2 = xindex // 64
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2 + 64 * x1), xmask)
tl.store(out_ptr0 + x3, tmp0, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4,), (1,), torch.float32)
buf2 = buf1
del buf1
get_raw_stream(0)
triton_per_fused_clamp_clone_mul_reciprocal_sum_0[grid(4)](buf2,
arg1_1, buf0, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg1_1
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_1[grid(256)](arg0_1, buf3, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf2, reinterpret_tensor(buf3, (4, 64), (64, 1), 0
), reinterpret_tensor(buf0, (4, 64), (64, 1), 0)
def flatten(tensor):
"""Flattens a given tensor such that the channel axis is first.
The shapes are transformed as follows:
(N, C, D, H, W) -> (C, N * D * H * W)
"""
C = tensor.size(1)
axis_order = (1, 0) + tuple(range(2, tensor.dim()))
transposed = tensor.permute(axis_order).contiguous()
return transposed.view(C, -1)
class GDiceLossV2New(nn.Module):
def __init__(self, apply_nonlin=None, smooth=1e-05):
"""
Generalized Dice;
Copy from: https://github.com/wolny/pytorch-3dunet/blob/6e5a24b6438f8c631289c10638a17dea14d42051/unet3d/losses.py#L75
paper: https://arxiv.org/pdf/1707.03237.pdf
tf code: https://github.com/NifTK/NiftyNet/blob/dev/niftynet/layer/loss_segmentation.py#L279
"""
super(GDiceLossV2New, self).__init__()
self.apply_nonlin = apply_nonlin
self.smooth = smooth
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| ayanglab/HDL | GDiceLossV2 | false | 6,295 | [
"Apache-2.0"
] | 1 | 5ff778d713331671ffa85e9fb63378d8c0a57769 | https://github.com/ayanglab/HDL/tree/5ff778d713331671ffa85e9fb63378d8c0a57769 | import torch
import torch.nn as nn
import torch.utils.data
import torch
from torch.autograd import Variable
def flatten(tensor):
"""Flattens a given tensor such that the channel axis is first.
The shapes are transformed as follows:
(N, C, D, H, W) -> (C, N * D * H * W)
"""
C = tensor.size(1)
axis_order = (1, 0) + tuple(range(2, tensor.dim()))
transposed = tensor.permute(axis_order).contiguous()
return transposed.view(C, -1)
class Model(nn.Module):
def __init__(self, apply_nonlin=None, smooth=1e-05):
"""
Generalized Dice;
Copy from: https://github.com/wolny/pytorch-3dunet/blob/6e5a24b6438f8c631289c10638a17dea14d42051/unet3d/losses.py#L75
paper: https://arxiv.org/pdf/1707.03237.pdf
tf code: https://github.com/NifTK/NiftyNet/blob/dev/niftynet/layer/loss_segmentation.py#L279
"""
super().__init__()
self.apply_nonlin = apply_nonlin
self.smooth = smooth
def forward(self, net_output, gt):
shp_x = net_output.shape
shp_y = gt.shape
with torch.no_grad():
if len(shp_x) != len(shp_y):
gt = gt.view((shp_y[0], 1, *shp_y[1:]))
if all([(i == j) for i, j in zip(net_output.shape, gt.shape)]):
y_onehot = gt
else:
gt = gt.long()
y_onehot = torch.zeros(shp_x)
if net_output.device.type == 'cuda':
y_onehot = y_onehot
y_onehot.scatter_(1, gt, 1)
if self.apply_nonlin is not None:
net_output = self.apply_nonlin(net_output)
input = flatten(net_output)
target = flatten(y_onehot)
target = target.float()
target_sum = target.sum(-1)
class_weights = Variable(1.0 / (target_sum * target_sum).clamp(min=
self.smooth), requires_grad=False)
intersect = (input * target).sum(-1) * class_weights
intersect = intersect.sum()
denominator = ((input + target).sum(-1) * class_weights).sum()
return -2.0 * intersect / denominator.clamp(min=self.smooth)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
BCELoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/f6/cf6msygnufcklcpvjj2djhdclyyqh6bvy7v6bxcvwogb4rkzv4nj.py
# Topologically Sorted Source Nodes: [binary_cross_entropy], Original ATen: [aten.binary_cross_entropy]
# Source node to ATen node mapping:
# binary_cross_entropy => full_default, full_default_1, log, log1p, maximum, maximum_1, mean, mul, mul_1, neg, sub, sub_1
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, 1), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%squeeze,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%neg,), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -100), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %maximum : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%log1p, %full_default), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %maximum), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%squeeze,), kwargs = {})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -100), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %maximum_1 : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%log, %full_default_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %maximum_1), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %mul_1), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_1,), kwargs = {})
triton_per_fused_binary_cross_entropy_0 = async_compile.triton('triton_per_fused_binary_cross_entropy_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_binary_cross_entropy_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_binary_cross_entropy_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp3 = tl.load(in_ptr1 + (r0), None)
tmp1 = 1.0
tmp2 = tmp0 - tmp1
tmp4 = -tmp3
tmp5 = libdevice.log1p(tmp4)
tmp6 = -100.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp2 * tmp7
tmp9 = tl_math.log(tmp3)
tmp10 = triton_helpers.maximum(tmp9, tmp6)
tmp11 = tmp0 * tmp10
tmp12 = tmp8 - tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = 256.0
tmp17 = tmp15 / tmp16
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp17, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [binary_cross_entropy], Original ATen: [aten.binary_cross_entropy]
stream0 = get_raw_stream(0)
triton_per_fused_binary_cross_entropy_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class BCELoss(nn.BCELoss):
def __init__(self, **kwargs):
super(BCELoss, self).__init__(**kwargs)
def forward(self, input, target):
input = input.squeeze(1)
target = target.float()
return super(BCELoss, self).forward(input, target)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_binary_cross_entropy_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp1 = 1.0
tmp2 = tmp0 - tmp1
tmp4 = -tmp3
tmp5 = libdevice.log1p(tmp4)
tmp6 = -100.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp2 * tmp7
tmp9 = tl_math.log(tmp3)
tmp10 = triton_helpers.maximum(tmp9, tmp6)
tmp11 = tmp0 * tmp10
tmp12 = tmp8 - tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = 256.0
tmp17 = tmp15 / tmp16
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp17, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_binary_cross_entropy_0[grid(1)](buf1, arg1_1,
arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class BCELossNew(nn.BCELoss):
def __init__(self, **kwargs):
super(BCELossNew, self).__init__(**kwargs)
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| azxj/BRRNet | BCELoss | false | 6,296 | [
"MIT"
] | 1 | 274068efd5453f2c1fb07bfaad448d048b9c793b | https://github.com/azxj/BRRNet/tree/274068efd5453f2c1fb07bfaad448d048b9c793b | import torch
import torch.nn as nn
class Model(nn.BCELoss):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def forward(self, input, target):
input = input.squeeze(1)
target = target.float()
return super(BCELoss, self).forward(input, target)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
LogLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/p5/cp5ubgxix66mcwdxvtf7bvbxdvoimtka63njezhp3tfusdmuegld.py
# Topologically Sorted Source Nodes: [inp, tgt, mse_loss, loss], Original ATen: [aten.atan, aten.mse_loss, aten.sqrt]
# Source node to ATen node mapping:
# inp => atan_1
# loss => sqrt
# mse_loss => mean, pow_1, sub
# tgt => atan
# Graph fragment:
# %atan_1 : [num_users=1] = call_function[target=torch.ops.aten.atan.default](args = (%arg1_1,), kwargs = {})
# %atan : [num_users=1] = call_function[target=torch.ops.aten.atan.default](args = (%arg0_1,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%atan_1, %atan), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%mean,), kwargs = {})
triton_per_fused_atan_mse_loss_sqrt_0 = async_compile.triton('triton_per_fused_atan_mse_loss_sqrt_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_atan_mse_loss_sqrt_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_atan_mse_loss_sqrt_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp2 = tl.load(in_ptr1 + (r0), None)
tmp1 = libdevice.atan(tmp0)
tmp3 = libdevice.atan(tmp2)
tmp4 = tmp1 - tmp3
tmp5 = tmp4 * tmp4
tmp6 = tl.broadcast_to(tmp5, [RBLOCK])
tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0))
tmp9 = 256.0
tmp10 = tmp8 / tmp9
tmp11 = libdevice.sqrt(tmp10)
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp11, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [inp, tgt, mse_loss, loss], Original ATen: [aten.atan, aten.mse_loss, aten.sqrt]
stream0 = get_raw_stream(0)
triton_per_fused_atan_mse_loss_sqrt_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch.nn import MSELoss
class LogLoss(MSELoss):
def __init__(self):
super(LogLoss, self).__init__()
self.loss = torch.nn.MSELoss()
self.loss2 = torch.nn.MSELoss()
def forward(self, input, target):
tgt = torch.atan(target)
inp = torch.atan(input)
loss = torch.sqrt(self.loss(inp, tgt))
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch.nn import MSELoss
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_atan_mse_loss_sqrt_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp2 = tl.load(in_ptr1 + r0, None)
tmp1 = libdevice.atan(tmp0)
tmp3 = libdevice.atan(tmp2)
tmp4 = tmp1 - tmp3
tmp5 = tmp4 * tmp4
tmp6 = tl.broadcast_to(tmp5, [RBLOCK])
tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0))
tmp9 = 256.0
tmp10 = tmp8 / tmp9
tmp11 = libdevice.sqrt(tmp10)
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp11, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_atan_mse_loss_sqrt_0[grid(1)](buf1, arg1_1, arg0_1,
1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class LogLossNew(MSELoss):
def __init__(self):
super(LogLossNew, self).__init__()
self.loss = torch.nn.MSELoss()
self.loss2 = torch.nn.MSELoss()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| aykuttasil/mindsdb | LogLoss | false | 6,297 | [
"MIT"
] | 1 | 2c36b6f75f13d7104fe4d3dbb7ca307fa84f45ad | https://github.com/aykuttasil/mindsdb/tree/2c36b6f75f13d7104fe4d3dbb7ca307fa84f45ad | import torch
from torch.nn import MSELoss
class Model(MSELoss):
def __init__(self):
super().__init__()
self.loss = torch.nn.MSELoss()
self.loss2 = torch.nn.MSELoss()
def forward(self, input, target):
tgt = torch.atan(target)
inp = torch.atan(input)
loss = torch.sqrt(self.loss(inp, tgt))
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
QueryModule | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/f6/cf65bigf2x75qqtqyjjyxjzpdp7kb4qomxjgfae36bygybmqynpn.py
# Topologically Sorted Source Nodes: [repeat, attended_feats], Original ATen: [aten.repeat, aten.mul]
# Source node to ATen node mapping:
# attended_feats => mul
# repeat => repeat
# Graph fragment:
# %repeat : [num_users=1] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_1, [1, 4, 1, 1]), kwargs = {})
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %repeat), kwargs = {})
triton_poi_fused_mul_repeat_0 = async_compile.triton('triton_poi_fused_mul_repeat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_repeat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_repeat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 4096
x2 = (xindex // 16384)
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x0 + (4096*x2)), None, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x3), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/2k/c2kt5ypnpdsrdo7ikfu23cbqutpdsaski7jflvenjwodgmh72m6c.py
# Topologically Sorted Source Nodes: [conv2d, out], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# out => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%mul, %primals_3, %primals_4, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 61504
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 3844) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ls/cls275xyrm3gz4f57t3oamopa2znb5qzxumhxr3zuvtwm5k43gkr.py
# Topologically Sorted Source Nodes: [conv2d_1, out_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# out_1 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_5, %primals_6, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_2 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 57600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x1 = (xindex // 3600) % 4
x0 = xindex % 3600
x3 = (xindex // 3600)
tmp0 = tl.load(in_out_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x4), tmp4, xmask)
tl.store(out_ptr0 + (x0 + (3712*x3)), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_2, (4, 4, 64, 64), (16384, 4096, 64, 1))
assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_6, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [repeat, attended_feats], Original ATen: [aten.repeat, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_repeat_0.run(primals_2, primals_1, buf0, 65536, grid=grid(65536), stream=stream0)
del primals_1
del primals_2
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 62, 62), (15376, 3844, 62, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [conv2d, out], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf2, primals_4, 61504, grid=grid(61504), stream=stream0)
del primals_4
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_5, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 60, 60), (14400, 3600, 60, 1))
buf4 = buf3; del buf3 # reuse
buf5 = empty_strided_cuda((4, 4, 60, 60), (14848, 3712, 60, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_1, out_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_2.run(buf4, primals_6, buf5, 57600, grid=grid(57600), stream=stream0)
del primals_6
return (buf4, primals_3, primals_5, buf0, buf2, buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 64, 64), (16384, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class QueryModule(nn.Module):
""" A neural module that takes as input a feature map and an attention and produces a feature
map as output.
"""
def __init__(self, dim):
super().__init__()
self.conv1 = nn.Conv2d(dim, dim, kernel_size=3)
self.conv2 = nn.Conv2d(dim, dim, kernel_size=3)
torch.nn.init.kaiming_normal_(self.conv1.weight)
torch.nn.init.kaiming_normal_(self.conv2.weight)
self.dim = dim
def forward(self, feats, attn):
attended_feats = torch.mul(feats, attn.repeat(1, self.dim, 1, 1))
out = F.relu(self.conv1(attended_feats))
out = F.relu(self.conv2(out))
return out
def get_inputs():
return [torch.rand([4, 4, 64, 64]), torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {'dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_repeat_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 4096
x2 = xindex // 16384
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (x0 + 4096 * x2), None, eviction_policy=
'evict_last')
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 61504
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3844 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_2(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 57600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x1 = xindex // 3600 % 4
x0 = xindex % 3600
x3 = xindex // 3600
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + (x0 + 3712 * x3), tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_2, (4, 4, 64, 64), (16384, 4096, 64, 1))
assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_repeat_0[grid(65536)](primals_2, primals_1,
buf0, 65536, XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_2
buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 62, 62), (15376, 3844, 62, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_relu_1[grid(61504)](buf2, primals_4,
61504, XBLOCK=512, num_warps=4, num_stages=1)
del primals_4
buf3 = extern_kernels.convolution(buf2, primals_5, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 60, 60), (14400, 3600, 60, 1))
buf4 = buf3
del buf3
buf5 = empty_strided_cuda((4, 4, 60, 60), (14848, 3712, 60, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_2[grid(57600)](
buf4, primals_6, buf5, 57600, XBLOCK=512, num_warps=4, num_stages=1
)
del primals_6
return buf4, primals_3, primals_5, buf0, buf2, buf5
class QueryModuleNew(nn.Module):
""" A neural module that takes as input a feature map and an attention and produces a feature
map as output.
"""
def __init__(self, dim):
super().__init__()
self.conv1 = nn.Conv2d(dim, dim, kernel_size=3)
self.conv2 = nn.Conv2d(dim, dim, kernel_size=3)
torch.nn.init.kaiming_normal_(self.conv1.weight)
torch.nn.init.kaiming_normal_(self.conv2.weight)
self.dim = dim
def forward(self, input_0, input_1):
primals_3 = self.conv1.weight
primals_4 = self.conv1.bias
primals_5 = self.conv2.weight
primals_6 = self.conv2.bias
primals_2 = input_0
primals_1 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
| aymenx17/ShapeCount | QueryModule | false | 6,298 | [
"Apache-2.0"
] | 1 | 6d2fb780684335ccd0127b3084bf40674203bcf1 | https://github.com/aymenx17/ShapeCount/tree/6d2fb780684335ccd0127b3084bf40674203bcf1 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
""" A neural module that takes as input a feature map and an attention and produces a feature
map as output.
"""
def __init__(self, dim):
super().__init__()
self.conv1 = nn.Conv2d(dim, dim, kernel_size=3)
self.conv2 = nn.Conv2d(dim, dim, kernel_size=3)
torch.nn.init.kaiming_normal_(self.conv1.weight)
torch.nn.init.kaiming_normal_(self.conv2.weight)
self.dim = dim
def forward(self, feats, attn):
attended_feats = torch.mul(feats, attn.repeat(1, self.dim, 1, 1))
out = F.relu(self.conv1(attended_feats))
out = F.relu(self.conv2(out))
return out
def get_inputs():
return [torch.rand([4, 4, 64, 64]), torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [4]
|
GCN | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/yp/cypwoula5voccv2ue3hp5xtbp77jvm4e2glil4m2p2a4o5qhkkx7.py
# Topologically Sorted Source Nodes: [relu, x], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# relu => relu
# x => relu_1
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_4,), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%relu,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = 0.0
tmp5 = tmp3 <= tmp4
tmp6 = tmp2 <= tmp4
tl.store(out_ptr0 + (x0), tmp3, xmask)
tl.store(out_ptr1 + (x0), tmp5, xmask)
tl.store(out_ptr2 + (x0), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [support], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(primals_4, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), out=buf1)
buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [relu, x], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, buf2, buf3, buf4, 256, grid=grid(256), stream=stream0)
del buf1
return (buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf3, buf4, reinterpret_tensor(primals_4, (16, 4, 4), (16, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class GraphConvolution(nn.Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, dropout=0.3):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.linear = nn.Linear(in_features, out_features)
self.dropout = nn.Dropout(dropout)
def forward(self, x, adj):
support = self.linear(x)
output = adj.matmul(support)
output = self.dropout(F.relu(output))
return output
class GCN(nn.Module):
def __init__(self, in_size, out_size, dropout):
super(GCN, self).__init__()
self.gc1 = GraphConvolution(in_size, out_size)
self.dropout = dropout
def forward(self, x, adj):
x = F.relu(self.gc1(x, adj))
x = F.dropout(x, self.dropout, training=self.training)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_size': 4, 'out_size': 4, 'dropout': 0.5}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = 0.0
tmp5 = tmp3 <= tmp4
tmp6 = tmp2 <= tmp4
tl.store(out_ptr0 + x0, tmp3, xmask)
tl.store(out_ptr1 + x0, tmp5, xmask)
tl.store(out_ptr2 + x0, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_4, (16, 4, 4), (16, 4,
1), 0), reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0),
out=buf1)
buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, buf2,
buf3, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf1
return buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf3, buf4, reinterpret_tensor(primals_4, (16, 4, 4), (16, 1, 4), 0)
class GraphConvolution(nn.Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, dropout=0.3):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.linear = nn.Linear(in_features, out_features)
self.dropout = nn.Dropout(dropout)
def forward(self, x, adj):
support = self.linear(x)
output = adj.matmul(support)
output = self.dropout(F.relu(output))
return output
class GCNNew(nn.Module):
def __init__(self, in_size, out_size, dropout):
super(GCNNew, self).__init__()
self.gc1 = GraphConvolution(in_size, out_size)
self.dropout = dropout
def forward(self, input_0, input_1):
primals_1 = self.gc1.linear.weight
primals_2 = self.gc1.linear.bias
primals_3 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| ayyyq/T-LSTM | GCN | false | 6,299 | [
"MIT"
] | 1 | 36dbc88ac710d3925851cd87c2368ecfc7061b70 | https://github.com/ayyyq/T-LSTM/tree/36dbc88ac710d3925851cd87c2368ecfc7061b70 | import torch
import torch.nn as nn
import torch.nn.functional as F
class GraphConvolution(nn.Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, dropout=0.3):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.linear = nn.Linear(in_features, out_features)
self.dropout = nn.Dropout(dropout)
def forward(self, x, adj):
support = self.linear(x)
output = adj.matmul(support)
output = self.dropout(F.relu(output))
return output
class Model(nn.Module):
def __init__(self, in_size, out_size, dropout):
super().__init__()
self.gc1 = GraphConvolution(in_size, out_size)
self.dropout = dropout
def forward(self, x, adj):
x = F.relu(self.gc1(x, adj))
x = F.dropout(x, self.dropout, training=self.training)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 0.5]
|
Net | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/k5/ck5ilor2trkiz44dx6xfb5amrafal5sleonmuiovxkn7recadkcg.py
# Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# relu => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8388608],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 7852416
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 327184) % 6
x0 = xindex % 327184
x4 = (xindex // 327184)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x0 + (327200*x4)), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/w7/cw7sjjibc3lbuwjvyhjrp4p4u4z5mz777qpqhbzavliikaix2a6f.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2097152],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 1963104
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 286
x1 = (xindex // 286) % 286
x2 = (xindex // 81796)
x3 = xindex % 81796
tmp0 = tl.load(in_ptr0 + ((2*x0) + (1144*x1) + (327200*x2)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (1144*x1) + (327200*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (572 + (2*x0) + (1144*x1) + (327200*x2)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (573 + (2*x0) + (1144*x1) + (327200*x2)), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x3 + (81824*x2)), tmp6, xmask)
tl.store(out_ptr1 + (x3 + (81920*x2)), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ht/chteer36r7sjkhy3oswu2ki3r3gzeww4iixtibxbybuqrjxy2esg.py
# Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# relu_1 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8388608],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 5089536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 79524) % 16
x0 = xindex % 79524
x4 = (xindex // 79524)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x0 + (79552*x4)), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/5q/c5q36puxlqndqrmcwl6kd5ldbjbccujsl2lv2qtf63mkx5jlfxi4.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_1 => _low_memory_max_pool2d_with_offsets_1, getitem_3
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets_1 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu_1, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2097152],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 1272384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 141
x1 = (xindex // 141) % 141
x2 = (xindex // 19881)
x3 = xindex % 19881
tmp0 = tl.load(in_ptr0 + ((2*x0) + (564*x1) + (79552*x2)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (564*x1) + (79552*x2)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (282 + (2*x0) + (564*x1) + (79552*x2)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (283 + (2*x0) + (564*x1) + (79552*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x3 + (19968*x2)), tmp15, xmask)
tl.store(out_ptr1 + (x3 + (19904*x2)), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/jx/cjxcz2fenzy56ehxz7auaneoinwqc6rlyasza5dllvckkgobi7ub.py
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.max_pool2d_with_indices, aten.view]
# Source node to ATen node mapping:
# x_1 => _low_memory_max_pool2d_with_offsets_1
# x_2 => view
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets_1 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu_1, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %view : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%getitem_2, [-1, 141376]), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_view_4 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_view_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2097152],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_view_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_view_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1272384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 141376
x1 = (xindex // 141376)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((19904*((x0 + (141376*x1)) // 19881)) + ((x0 + (141376*x1)) % 19881)), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/4p/c4pqt5y36x352qxu2w3svdkfwdex3nqgzyn4p4xrylhy7rcwa4kv.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_3 => relu_2
# Graph fragment:
# %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_7), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {})
triton_poi_fused_relu_5 = async_compile.triton('triton_poi_fused_relu_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1080
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 120
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/km/ckmzedqoppvjumcpvuwr7rlbwsu5t5uf452zjvb7mms5glmirjlz.py
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_4 => relu_3
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_9), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_6 = async_compile.triton('triton_poi_fused_relu_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 756
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 84
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (6, 3, 5, 5), (75, 25, 5, 1))
assert_size_stride(primals_2, (6, ), (1, ))
assert_size_stride(primals_3, (4, 3, 576, 576), (995328, 331776, 576, 1))
assert_size_stride(primals_4, (16, 6, 5, 5), (150, 25, 5, 1))
assert_size_stride(primals_5, (16, ), (1, ))
assert_size_stride(primals_6, (120, 141376), (141376, 1))
assert_size_stride(primals_7, (120, ), (1, ))
assert_size_stride(primals_8, (84, 120), (120, 1))
assert_size_stride(primals_9, (84, ), (1, ))
assert_size_stride(primals_10, (4, 84), (84, 1))
assert_size_stride(primals_11, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 6, 572, 572), (1963104, 327184, 572, 1))
buf1 = empty_strided_cuda((4, 6, 572, 572), (1963200, 327200, 572, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf0, primals_2, buf1, 7852416, grid=grid(7852416), stream=stream0)
del buf0
del primals_2
buf2 = empty_strided_cuda((4, 6, 286, 286), (490944, 81824, 286, 1), torch.float32)
buf3 = empty_strided_cuda((4, 6, 286, 286), (491520, 81920, 286, 1), torch.int8)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_1.run(buf1, buf2, buf3, 1963104, grid=grid(1963104), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 16, 282, 282), (1272384, 79524, 282, 1))
buf5 = empty_strided_cuda((4, 16, 282, 282), (1272832, 79552, 282, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf4, primals_5, buf5, 5089536, grid=grid(5089536), stream=stream0)
del buf4
del primals_5
buf6 = empty_strided_cuda((4, 16, 141, 141), (319488, 19968, 141, 1), torch.int8)
buf7 = empty_strided_cuda((4, 16, 141, 141), (318464, 19904, 141, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_3.run(buf5, buf6, buf7, 1272384, grid=grid(1272384), stream=stream0)
buf8 = empty_strided_cuda((9, 141376), (141376, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.max_pool2d_with_indices, aten.view]
triton_poi_fused_max_pool2d_with_indices_view_4.run(buf7, buf8, 1272384, grid=grid(1272384), stream=stream0)
del buf7
buf9 = empty_strided_cuda((9, 120), (120, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf8, reinterpret_tensor(primals_6, (141376, 120), (1, 141376), 0), out=buf9)
buf10 = buf9; del buf9 # reuse
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu]
triton_poi_fused_relu_5.run(buf10, primals_7, 1080, grid=grid(1080), stream=stream0)
del primals_7
buf11 = empty_strided_cuda((9, 84), (84, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf10, reinterpret_tensor(primals_8, (120, 84), (1, 120), 0), out=buf11)
buf12 = buf11; del buf11 # reuse
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu]
triton_poi_fused_relu_6.run(buf12, primals_9, 756, grid=grid(756), stream=stream0)
del primals_9
buf13 = empty_strided_cuda((9, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_11, buf12, reinterpret_tensor(primals_10, (84, 4), (1, 84), 0), alpha=1, beta=1, out=buf13)
del primals_11
return (buf13, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5, buf6, buf8, buf10, buf12, primals_10, primals_8, primals_6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((6, 3, 5, 5), (75, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((6, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 576, 576), (995328, 331776, 576, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, 6, 5, 5), (150, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((120, 141376), (141376, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((120, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((84, 120), (120, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((84, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 84), (84, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 94 * 94, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 4)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 94 * 94)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def get_inputs():
return [torch.rand([4, 3, 576, 576])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 7852416
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 327184 % 6
x0 = xindex % 327184
x4 = xindex // 327184
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x0 + 327200 * x4), tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 1963104
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 286
x1 = xindex // 286 % 286
x2 = xindex // 81796
x3 = xindex % 81796
tmp0 = tl.load(in_ptr0 + (2 * x0 + 1144 * x1 + 327200 * x2), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 1144 * x1 + 327200 * x2), xmask,
eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (572 + 2 * x0 + 1144 * x1 + 327200 * x2),
xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (573 + 2 * x0 + 1144 * x1 + 327200 * x2),
xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x3 + 81824 * x2), tmp6, xmask)
tl.store(out_ptr1 + (x3 + 81920 * x2), tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 5089536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 79524 % 16
x0 = xindex % 79524
x4 = xindex // 79524
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x0 + 79552 * x4), tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 1272384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 141
x1 = xindex // 141 % 141
x2 = xindex // 19881
x3 = xindex % 19881
tmp0 = tl.load(in_ptr0 + (2 * x0 + 564 * x1 + 79552 * x2), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 564 * x1 + 79552 * x2), xmask,
eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (282 + 2 * x0 + 564 * x1 + 79552 * x2), xmask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (283 + 2 * x0 + 564 * x1 + 79552 * x2), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x3 + 19968 * x2), tmp15, xmask)
tl.store(out_ptr1 + (x3 + 19904 * x2), tmp16, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_view_4(in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 1272384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 141376
x1 = xindex // 141376
x2 = xindex
tmp0 = tl.load(in_ptr0 + (19904 * ((x0 + 141376 * x1) // 19881) + (x0 +
141376 * x1) % 19881), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1080
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 120
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 756
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 84
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (6, 3, 5, 5), (75, 25, 5, 1))
assert_size_stride(primals_2, (6,), (1,))
assert_size_stride(primals_3, (4, 3, 576, 576), (995328, 331776, 576, 1))
assert_size_stride(primals_4, (16, 6, 5, 5), (150, 25, 5, 1))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (120, 141376), (141376, 1))
assert_size_stride(primals_7, (120,), (1,))
assert_size_stride(primals_8, (84, 120), (120, 1))
assert_size_stride(primals_9, (84,), (1,))
assert_size_stride(primals_10, (4, 84), (84, 1))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 6, 572, 572), (1963104, 327184, 572, 1))
buf1 = empty_strided_cuda((4, 6, 572, 572), (1963200, 327200, 572,
1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(7852416)](buf0, primals_2,
buf1, 7852416, XBLOCK=512, num_warps=8, num_stages=1)
del buf0
del primals_2
buf2 = empty_strided_cuda((4, 6, 286, 286), (490944, 81824, 286, 1),
torch.float32)
buf3 = empty_strided_cuda((4, 6, 286, 286), (491520, 81920, 286, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(1963104)](buf1,
buf2, buf3, 1963104, XBLOCK=512, num_warps=8, num_stages=1)
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 16, 282, 282), (1272384, 79524, 282, 1))
buf5 = empty_strided_cuda((4, 16, 282, 282), (1272832, 79552, 282,
1), torch.float32)
triton_poi_fused_convolution_relu_2[grid(5089536)](buf4, primals_5,
buf5, 5089536, XBLOCK=512, num_warps=8, num_stages=1)
del buf4
del primals_5
buf6 = empty_strided_cuda((4, 16, 141, 141), (319488, 19968, 141, 1
), torch.int8)
buf7 = empty_strided_cuda((4, 16, 141, 141), (318464, 19904, 141, 1
), torch.float32)
triton_poi_fused_max_pool2d_with_indices_3[grid(1272384)](buf5,
buf6, buf7, 1272384, XBLOCK=512, num_warps=8, num_stages=1)
buf8 = empty_strided_cuda((9, 141376), (141376, 1), torch.float32)
triton_poi_fused_max_pool2d_with_indices_view_4[grid(1272384)](buf7,
buf8, 1272384, XBLOCK=1024, num_warps=4, num_stages=1)
del buf7
buf9 = empty_strided_cuda((9, 120), (120, 1), torch.float32)
extern_kernels.mm(buf8, reinterpret_tensor(primals_6, (141376, 120),
(1, 141376), 0), out=buf9)
buf10 = buf9
del buf9
triton_poi_fused_relu_5[grid(1080)](buf10, primals_7, 1080, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_7
buf11 = empty_strided_cuda((9, 84), (84, 1), torch.float32)
extern_kernels.mm(buf10, reinterpret_tensor(primals_8, (120, 84), (
1, 120), 0), out=buf11)
buf12 = buf11
del buf11
triton_poi_fused_relu_6[grid(756)](buf12, primals_9, 756, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_9
buf13 = empty_strided_cuda((9, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_11, buf12, reinterpret_tensor(
primals_10, (84, 4), (1, 84), 0), alpha=1, beta=1, out=buf13)
del primals_11
return (buf13, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5,
buf6, buf8, buf10, buf12, primals_10, primals_8, primals_6)
class NetNew(nn.Module):
def __init__(self):
super(NetNew, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 94 * 94, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 4)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.fc1.weight
primals_7 = self.fc1.bias
primals_8 = self.fc2.weight
primals_9 = self.fc2.bias
primals_10 = self.fc3.weight
primals_11 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
| arefmalek/Demographics_Disenfranchisement | Net | false | 6,300 | [
"MIT"
] | 1 | f4ae8c0965cf1b1cab9b245c3f5f54d3b5fe9aba | https://github.com/arefmalek/Demographics_Disenfranchisement/tree/f4ae8c0965cf1b1cab9b245c3f5f54d3b5fe9aba | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 94 * 94, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 4)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 94 * 94)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def get_inputs():
return [torch.rand([4, 3, 576, 576])]
def get_init_inputs():
return []
|
ExgLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/2q/c2q22dx3h7edumh6lpz6bxadowar3krb43bmjp6rgao6qugg2rep.py
# Topologically Sorted Source Nodes: [add, add_1], Original ATen: [aten.add]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %view_3), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %view_5), kwargs = {})
triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x2), xmask)
tmp4 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr3 + (x2), xmask)
tmp8 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tl.store(in_out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_9, (64, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2)
del primals_7
buf3 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [add, add_1], Original ATen: [aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_0.run(buf3, primals_2, buf1, primals_5, buf2, primals_8, 256, grid=grid(256), stream=stream0)
del buf1
del buf2
del primals_2
del primals_5
del primals_8
return (buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_9, (64, 4), (4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class ExgLayer(nn.Module):
def __init__(self, x_size, h_size, g_size, out_size):
super(ExgLayer, self).__init__()
self.h_size = h_size
self.g_size = g_size
self.out_size = out_size
self.x_size = x_size
self.linear_x2 = nn.Linear(x_size, out_size)
self.linear_h2 = nn.Linear(h_size, out_size)
self.linear_g2 = nn.Linear(g_size, out_size)
def forward(self, x, h, g):
return self.linear_x2(x) + self.linear_h2(h) + self.linear_g2(g)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'x_size': 4, 'h_size': 4, 'g_size': 4, 'out_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr3 + x2, xmask)
tmp8 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tl.store(in_out_ptr0 + x2, tmp10, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_9, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2)
del primals_7
buf3 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_add_0[grid(256)](buf3, primals_2, buf1, primals_5,
buf2, primals_8, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf1
del buf2
del primals_2
del primals_5
del primals_8
return buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_6, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_9, (64, 4), (4, 1), 0)
class ExgLayerNew(nn.Module):
def __init__(self, x_size, h_size, g_size, out_size):
super(ExgLayerNew, self).__init__()
self.h_size = h_size
self.g_size = g_size
self.out_size = out_size
self.x_size = x_size
self.linear_x2 = nn.Linear(x_size, out_size)
self.linear_h2 = nn.Linear(h_size, out_size)
self.linear_g2 = nn.Linear(g_size, out_size)
def forward(self, input_0, input_1, input_2):
primals_1 = self.linear_x2.weight
primals_2 = self.linear_x2.bias
primals_4 = self.linear_h2.weight
primals_5 = self.linear_h2.bias
primals_7 = self.linear_g2.weight
primals_8 = self.linear_g2.bias
primals_3 = input_0
primals_6 = input_1
primals_9 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| ayyyq/T-LSTM | ExgLayer | false | 6,301 | [
"MIT"
] | 1 | 36dbc88ac710d3925851cd87c2368ecfc7061b70 | https://github.com/ayyyq/T-LSTM/tree/36dbc88ac710d3925851cd87c2368ecfc7061b70 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, x_size, h_size, g_size, out_size):
super().__init__()
self.h_size = h_size
self.g_size = g_size
self.out_size = out_size
self.x_size = x_size
self.linear_x2 = nn.Linear(x_size, out_size)
self.linear_h2 = nn.Linear(h_size, out_size)
self.linear_g2 = nn.Linear(g_size, out_size)
def forward(self, x, h, g):
return self.linear_x2(x) + self.linear_h2(h) + self.linear_g2(g)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4, 4]
|
myCustomModel | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/lj/clj3mscf4tqz7wlbw6be5v4cyjekfwhnnfmmz6ngsoi4fvpbcy27.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 144000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 3600) % 10
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/xr/cxr65jdasvpgnteicbgqvfut76mmpllnjbf3v2jm2v2roq4iu75d.py
# Topologically Sorted Source Nodes: [max_pool2d, x, log_softmax], Original ATen: [aten.max_pool2d_with_indices, aten.relu, aten._log_softmax]
# Source node to ATen node mapping:
# log_softmax => amax
# max_pool2d => _low_memory_max_pool2d_with_offsets
# x => relu
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%convolution, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %relu : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%getitem,), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%relu, [1], True), kwargs = {})
triton_per_fused__log_softmax_max_pool2d_with_indices_relu_1 = async_compile.triton('triton_per_fused__log_softmax_max_pool2d_with_indices_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4096, 16],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_max_pool2d_with_indices_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__log_softmax_max_pool2d_with_indices_relu_1(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 3600
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r3 = rindex
x0 = xindex % 30
x1 = (xindex // 30) % 30
x2 = (xindex // 900)
x4 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (120*x1) + (3600*r3) + (36000*x2)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (120*x1) + (3600*r3) + (36000*x2)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp3 = tl.load(in_ptr0 + (60 + (2*x0) + (120*x1) + (3600*r3) + (36000*x2)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp5 = tl.load(in_ptr0 + (61 + (2*x0) + (120*x1) + (3600*r3) + (36000*x2)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tl.full([1, 1], 0, tl.int32)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.where(rmask & xmask, tmp9, float("-inf"))
tmp12 = triton_helpers.max2(tmp11, 1)[:, None]
tl.store(out_ptr0 + (x4), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/fb/cfbeya5mpnm5mpeblt7ld5jt6zk7bs4vdxj7yqyw32uw7faqhzrc.py
# Topologically Sorted Source Nodes: [max_pool2d, x, log_softmax], Original ATen: [aten.max_pool2d_with_indices, aten.relu, aten._log_softmax, aten.threshold_backward]
# Source node to ATen node mapping:
# log_softmax => sub
# max_pool2d => _low_memory_max_pool2d_with_offsets, getitem_1
# x => relu
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%convolution, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
# %relu : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%getitem,), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu, %amax), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused__log_softmax_max_pool2d_with_indices_relu_threshold_backward_2 = async_compile.triton('triton_poi_fused__log_softmax_max_pool2d_with_indices_relu_threshold_backward_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: '*fp32', 4: '*i1', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_max_pool2d_with_indices_relu_threshold_backward_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_max_pool2d_with_indices_relu_threshold_backward_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 36000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 30
x5 = (xindex // 30)
x2 = (xindex // 9000)
x6 = xindex % 9000
x7 = xindex % 900
tmp0 = tl.load(in_ptr0 + ((2*x0) + (120*x5)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (120*x5)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (60 + (2*x0) + (120*x5)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (61 + (2*x0) + (120*x5)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr1 + (x7 + (900*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp20 = tmp18 - tmp19
tmp21 = 0.0
tmp22 = tmp18 <= tmp21
tl.store(out_ptr0 + (x6 + (9088*x2)), tmp15, xmask)
tl.store(out_ptr1 + (x6 + (9024*x2)), tmp20, xmask)
tl.store(out_ptr2 + (x6 + (9088*x2)), tmp22, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ft/cft4mlnf34zpqfnf5ewqhl7tp5sqb7erbzskwdcruy33ojoi5cll.py
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# log_softmax => exp, sum_1
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
triton_per_fused__log_softmax_3 = async_compile.triton('triton_per_fused__log_softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4096, 16],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__log_softmax_3(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 3600
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r2 = rindex
x0 = xindex % 900
x1 = (xindex // 900)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (900*r2) + (9024*x1)), rmask & xmask, other=0.0)
tmp1 = tl_math.exp(tmp0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(rmask & xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tl.store(out_ptr0 + (x3), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/hz/chzybqdix7n6nxl3ea723vdxr6yps7cxjnu3t4yyxcjzbcjkinoc.py
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# log_softmax => log, sub_1
# Graph fragment:
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
triton_poi_fused__log_softmax_4 = async_compile.triton('triton_poi_fused__log_softmax_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 36000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = (xindex // 9000)
x3 = xindex % 9000
x0 = xindex % 900
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x3 + (9024*x2)), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + (900*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl_math.log(tmp1)
tmp3 = tmp0 - tmp2
tl.store(out_ptr0 + (x4), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (10, 1, 5, 5), (25, 25, 5, 1))
assert_size_stride(primals_2, (10, ), (1, ))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 10, 60, 60), (36000, 3600, 60, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf1, primals_2, 144000, grid=grid(144000), stream=stream0)
del primals_2
buf3 = empty_strided_cuda((4, 1, 30, 30), (900, 3616, 30, 1), torch.float32)
# Topologically Sorted Source Nodes: [max_pool2d, x, log_softmax], Original ATen: [aten.max_pool2d_with_indices, aten.relu, aten._log_softmax]
triton_per_fused__log_softmax_max_pool2d_with_indices_relu_1.run(buf1, buf3, 3600, 10, grid=grid(3600), stream=stream0)
buf2 = empty_strided_cuda((4, 10, 30, 30), (9088, 900, 30, 1), torch.int8)
buf4 = empty_strided_cuda((4, 10, 30, 30), (9024, 900, 30, 1), torch.float32)
buf7 = empty_strided_cuda((4, 10, 30, 30), (9088, 900, 30, 1), torch.bool)
# Topologically Sorted Source Nodes: [max_pool2d, x, log_softmax], Original ATen: [aten.max_pool2d_with_indices, aten.relu, aten._log_softmax, aten.threshold_backward]
triton_poi_fused__log_softmax_max_pool2d_with_indices_relu_threshold_backward_2.run(buf1, buf3, buf2, buf4, buf7, 36000, grid=grid(36000), stream=stream0)
buf5 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
triton_per_fused__log_softmax_3.run(buf4, buf5, 3600, 10, grid=grid(3600), stream=stream0)
buf6 = empty_strided_cuda((4, 10, 30, 30), (9000, 900, 30, 1), torch.float32)
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
triton_poi_fused__log_softmax_4.run(buf4, buf5, buf6, 36000, grid=grid(36000), stream=stream0)
del buf4
del buf5
return (buf6, primals_1, primals_3, buf1, buf2, buf6, buf7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((10, 1, 5, 5), (25, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import logging
import torch
import numpy as np
import torch.nn as nn
from torch.nn import functional as F
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
class BaseModel(nn.Module):
"""
Base class for all models
All models require an initialization and forward method, and __str__ is what is shown when print(model) is called
"""
def __init__(self):
super(BaseModel, self).__init__()
self.logger = logging.getLogger(self.__class__.__name__)
def forward(self, *input):
"""
Forward pass logic
:return: Model output
"""
raise NotImplementedError
def __str__(self):
"""
Model prints with number of trainable parameters
"""
model_parameters = filter(lambda p: p.requires_grad, self.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
return super(BaseModel, self).__str__(
) + '\nTrainable parameters: {}'.format(params)
class myCustomModel(BaseModel):
def __init__(self, num_classes=2):
super(myCustomModel, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
return F.log_softmax(x, dim=1)
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import logging
import numpy as np
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 144000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3600 % 10
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_per_fused__log_softmax_max_pool2d_with_indices_relu_1(in_ptr0,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 3600
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r3 = rindex
x0 = xindex % 30
x1 = xindex // 30 % 30
x2 = xindex // 900
x4 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 120 * x1 + 3600 * r3 + 36000 * x2),
rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 120 * x1 + 3600 * r3 + 36000 *
x2), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp3 = tl.load(in_ptr0 + (60 + 2 * x0 + 120 * x1 + 3600 * r3 + 36000 *
x2), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp5 = tl.load(in_ptr0 + (61 + 2 * x0 + 120 * x1 + 3600 * r3 + 36000 *
x2), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tl.full([1, 1], 0, tl.int32)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.where(rmask & xmask, tmp9, float('-inf'))
tmp12 = triton_helpers.max2(tmp11, 1)[:, None]
tl.store(out_ptr0 + x4, tmp12, xmask)
@triton.jit
def triton_poi_fused__log_softmax_max_pool2d_with_indices_relu_threshold_backward_2(
in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.
constexpr):
xnumel = 36000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 30
x5 = xindex // 30
x2 = xindex // 9000
x6 = xindex % 9000
x7 = xindex % 900
tmp0 = tl.load(in_ptr0 + (2 * x0 + 120 * x5), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 120 * x5), xmask,
eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (60 + 2 * x0 + 120 * x5), xmask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (61 + 2 * x0 + 120 * x5), xmask,
eviction_policy='evict_last')
tmp19 = tl.load(in_ptr1 + (x7 + 900 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp20 = tmp18 - tmp19
tmp21 = 0.0
tmp22 = tmp18 <= tmp21
tl.store(out_ptr0 + (x6 + 9088 * x2), tmp15, xmask)
tl.store(out_ptr1 + (x6 + 9024 * x2), tmp20, xmask)
tl.store(out_ptr2 + (x6 + 9088 * x2), tmp22, xmask)
@triton.jit
def triton_per_fused__log_softmax_3(in_ptr0, out_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 3600
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r2 = rindex
x0 = xindex % 900
x1 = xindex // 900
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 900 * r2 + 9024 * x1), rmask & xmask,
other=0.0)
tmp1 = tl_math.exp(tmp0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(rmask & xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tl.store(out_ptr0 + x3, tmp5, xmask)
@triton.jit
def triton_poi_fused__log_softmax_4(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 36000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 9000
x3 = xindex % 9000
x0 = xindex % 900
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x3 + 9024 * x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 900 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl_math.log(tmp1)
tmp3 = tmp0 - tmp2
tl.store(out_ptr0 + x4, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (10, 1, 5, 5), (25, 25, 5, 1))
assert_size_stride(primals_2, (10,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 10, 60, 60), (36000, 3600, 60, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(144000)](buf1, primals_2,
144000, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf3 = empty_strided_cuda((4, 1, 30, 30), (900, 3616, 30, 1), torch
.float32)
triton_per_fused__log_softmax_max_pool2d_with_indices_relu_1[grid(3600)
](buf1, buf3, 3600, 10, XBLOCK=8, num_warps=2, num_stages=1)
buf2 = empty_strided_cuda((4, 10, 30, 30), (9088, 900, 30, 1),
torch.int8)
buf4 = empty_strided_cuda((4, 10, 30, 30), (9024, 900, 30, 1),
torch.float32)
buf7 = empty_strided_cuda((4, 10, 30, 30), (9088, 900, 30, 1),
torch.bool)
triton_poi_fused__log_softmax_max_pool2d_with_indices_relu_threshold_backward_2[
grid(36000)](buf1, buf3, buf2, buf4, buf7, 36000, XBLOCK=256,
num_warps=4, num_stages=1)
buf5 = buf3
del buf3
triton_per_fused__log_softmax_3[grid(3600)](buf4, buf5, 3600, 10,
XBLOCK=32, num_warps=4, num_stages=1)
buf6 = empty_strided_cuda((4, 10, 30, 30), (9000, 900, 30, 1),
torch.float32)
triton_poi_fused__log_softmax_4[grid(36000)](buf4, buf5, buf6,
36000, XBLOCK=256, num_warps=4, num_stages=1)
del buf4
del buf5
return buf6, primals_1, primals_3, buf1, buf2, buf6, buf7
class BaseModel(nn.Module):
"""
Base class for all models
All models require an initialization and forward method, and __str__ is what is shown when print(model) is called
"""
def __init__(self):
super(BaseModel, self).__init__()
self.logger = logging.getLogger(self.__class__.__name__)
def forward(self, *input):
"""
Forward pass logic
:return: Model output
"""
raise NotImplementedError
def __str__(self):
"""
Model prints with number of trainable parameters
"""
model_parameters = filter(lambda p: p.requires_grad, self.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
return super(BaseModel, self).__str__(
) + '\nTrainable parameters: {}'.format(params)
class myCustomModelNew(BaseModel):
def __init__(self, num_classes=2):
super(myCustomModelNew, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| awoloshuk/NephNet | myCustomModel | false | 6,302 | [
"MIT"
] | 1 | 562431364874fef1680069c7a5235c67b96504b8 | https://github.com/awoloshuk/NephNet/tree/562431364874fef1680069c7a5235c67b96504b8 | import logging
import torch
import numpy as np
import torch.nn as nn
from torch.nn import functional as F
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
class BaseModel(nn.Module):
"""
Base class for all models
All models require an initialization and forward method, and __str__ is what is shown when print(model) is called
"""
def __init__(self):
super().__init__()
self.logger = logging.getLogger(self.__class__.__name__)
def forward(self, *input):
"""
Forward pass logic
:return: Model output
"""
raise NotImplementedError
def __str__(self):
"""
Model prints with number of trainable parameters
"""
model_parameters = filter(lambda p: p.requires_grad, self.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
return super(BaseModel, self).__str__(
) + '\nTrainable parameters: {}'.format(params)
class Model(BaseModel):
def __init__(self, num_classes=2):
super().__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
return F.log_softmax(x, dim=1)
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return []
|
DiceLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/s7/cs7jv7spsytbq3ouvdhla2tcr7wzgoznysid6m7rapuqn7g7cc3h.py
# Topologically Sorted Source Nodes: [intersection, sum_1, sum_2, sum_3], Original ATen: [aten.mul, aten.sum]
# Source node to ATen node mapping:
# intersection => mul
# sum_1 => sum_1
# sum_2 => sum_2
# sum_3 => sum_3
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %view_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view, [1]), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view_1, [1]), kwargs = {})
triton_per_fused_mul_sum_0 = async_compile.triton('triton_per_fused_mul_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mul_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + (64*x0)), xmask, other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp7 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp13 = tl.where(xmask, tmp11, 0)
tmp14 = tl.sum(tmp13, 1)[:, None]
tl.store(out_ptr0 + (x0), tmp6, xmask)
tl.store(out_ptr1 + (x0), tmp10, xmask)
tl.store(out_ptr2 + (x0), tmp14, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/vq/cvqiixp4wmb73ig2cla6idbqq7i6vd5n3qmdluadrv32f52pdgw3.py
# Topologically Sorted Source Nodes: [add, mul_1, add_1, add_2, truediv, sum_4, truediv_1, loss], Original ATen: [aten.add, aten.mul, aten.div, aten.sum, aten.rsub]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# add_2 => add_2
# loss => sub
# mul_1 => mul_1
# sum_4 => sum_4
# truediv => div
# truediv_1 => div_1
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1.0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, %sum_3), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, 1.0), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_1, %add_2), kwargs = {})
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%div,), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_4, 4), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div_1), kwargs = {})
triton_per_fused_add_div_mul_rsub_sum_1 = async_compile.triton('triton_per_fused_add_div_mul_rsub_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mul_rsub_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mul_rsub_sum_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp5 = tl.load(in_ptr1 + (r0), None)
tmp6 = tl.load(in_ptr2 + (r0), None)
tmp1 = 1.0
tmp2 = tmp0 + tmp1
tmp3 = 2.0
tmp4 = tmp2 * tmp3
tmp7 = tmp5 + tmp6
tmp8 = tmp7 + tmp1
tmp9 = tmp4 / tmp8
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = tl.sum(tmp10, 1)[:, None]
tmp13 = 0.25
tmp14 = tmp12 * tmp13
tmp15 = tmp1 - tmp14
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp15, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf1 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf2 = empty_strided_cuda((4, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [intersection, sum_1, sum_2, sum_3], Original ATen: [aten.mul, aten.sum]
stream0 = get_raw_stream(0)
triton_per_fused_mul_sum_0.run(arg0_1, arg1_1, buf0, buf1, buf2, 4, 64, grid=grid(4), stream=stream0)
del arg0_1
del arg1_1
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [add, mul_1, add_1, add_2, truediv, sum_4, truediv_1, loss], Original ATen: [aten.add, aten.mul, aten.div, aten.sum, aten.rsub]
triton_per_fused_add_div_mul_rsub_sum_1.run(buf4, buf0, buf1, buf2, 1, 4, grid=grid(1), stream=stream0)
del buf0
del buf1
del buf2
return (buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class DiceLoss(nn.Module):
def __init__(self, smooth=1.0):
super(DiceLoss, self).__init__()
self.smooth = smooth
def forward(self, input, target):
n = input.shape[0]
input = input.view(n, -1)
target = target.view(n, -1)
intersection = input * target
loss = 1 - (2 * (intersection.sum(1) + self.smooth) / (input.sum(1) +
target.sum(1) + self.smooth)).sum() / n
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.sum(tmp5, 1)[:, None]
tmp7 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp13 = tl.where(xmask, tmp11, 0)
tmp14 = tl.sum(tmp13, 1)[:, None]
tl.store(out_ptr0 + x0, tmp6, xmask)
tl.store(out_ptr1 + x0, tmp10, xmask)
tl.store(out_ptr2 + x0, tmp14, xmask)
@triton.jit
def triton_per_fused_add_div_mul_rsub_sum_1(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp5 = tl.load(in_ptr1 + r0, None)
tmp6 = tl.load(in_ptr2 + r0, None)
tmp1 = 1.0
tmp2 = tmp0 + tmp1
tmp3 = 2.0
tmp4 = tmp2 * tmp3
tmp7 = tmp5 + tmp6
tmp8 = tmp7 + tmp1
tmp9 = tmp4 / tmp8
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = tl.sum(tmp10, 1)[:, None]
tmp13 = 0.25
tmp14 = tmp12 * tmp13
tmp15 = tmp1 - tmp14
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp15, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.float32)
buf1 = empty_strided_cuda((4,), (1,), torch.float32)
buf2 = empty_strided_cuda((4,), (1,), torch.float32)
get_raw_stream(0)
triton_per_fused_mul_sum_0[grid(4)](arg0_1, arg1_1, buf0, buf1,
buf2, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf3
del buf3
triton_per_fused_add_div_mul_rsub_sum_1[grid(1)](buf4, buf0, buf1,
buf2, 1, 4, XBLOCK=1, num_warps=2, num_stages=1)
del buf0
del buf1
del buf2
return buf4,
class DiceLossNew(nn.Module):
def __init__(self, smooth=1.0):
super(DiceLossNew, self).__init__()
self.smooth = smooth
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| azxj/BRRNet | DiceLoss | false | 6,303 | [
"MIT"
] | 1 | 274068efd5453f2c1fb07bfaad448d048b9c793b | https://github.com/azxj/BRRNet/tree/274068efd5453f2c1fb07bfaad448d048b9c793b | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, smooth=1.0):
super().__init__()
self.smooth = smooth
def forward(self, input, target):
n = input.shape[0]
input = input.view(n, -1)
target = target.view(n, -1)
intersection = input * target
loss = 1 - (2 * (intersection.sum(1) + self.smooth) / (input.sum(1) +
target.sum(1) + self.smooth)).sum() / n
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
CombinedTargetMSELoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/3f/c3fha6hwigp5qdkirvgzpdtvtztnza4ys4zevam5i2owamrhkdzx.py
# Topologically Sorted Source Nodes: [heatmap_pred_1, heatmap_gt_1, mse_loss, mul_2, loss, mul_3, mul_4, mse_loss_1, mul_5, loss_1, mul_6, mul_7, mse_loss_2, mul_8, loss_2, truediv, mul_9], Original ATen: [aten.mul, aten.mse_loss, aten.add, aten.div]
# Source node to ATen node mapping:
# heatmap_gt_1 => mul_1
# heatmap_pred_1 => mul
# loss => add
# loss_1 => add_1
# loss_2 => add_2
# mse_loss => mean, pow_1, sub
# mse_loss_1 => mean_1, pow_2, sub_1
# mse_loss_2 => mean_2, pow_3, sub_2
# mul_2 => mul_2
# mul_3 => mul_3
# mul_4 => mul_4
# mul_5 => mul_5
# mul_6 => mul_6
# mul_7 => mul_7
# mul_8 => mul_8
# mul_9 => mul_9
# truediv => div
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze, %select), kwargs = {})
# %mul_1 : [num_users=5] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze_1, %select_1), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %mul_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 0.5), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, 0.0), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %squeeze_2), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %squeeze_3), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_3, %mul_4), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_2,), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean_1, 0.5), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %mul_5), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %squeeze_4), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %squeeze_5), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_6, %mul_7), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_2, 2), kwargs = {})
# %mean_2 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_3,), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean_2, 0.5), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %mul_8), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_2, 1), kwargs = {})
# %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, 1.0), kwargs = {})
triton_per_fused_add_div_mse_loss_mul_0 = async_compile.triton('triton_per_fused_add_div_mse_loss_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mse_loss_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mse_loss_mul_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (4*r0), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*r0), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (4*r0), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr2 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr2 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp5 = tmp2 - tmp4
tmp6 = tmp5 * tmp5
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.sum(tmp7, 1)[:, None]
tmp11 = tmp4 * tmp10
tmp13 = tmp4 * tmp12
tmp14 = tmp11 - tmp13
tmp15 = tmp14 * tmp14
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp18 = tl.sum(tmp16, 1)[:, None]
tmp20 = tmp4 * tmp19
tmp22 = tmp4 * tmp21
tmp23 = tmp20 - tmp22
tmp24 = tmp23 * tmp23
tmp25 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK])
tmp27 = tl.sum(tmp25, 1)[:, None]
tmp28 = 4.0
tmp29 = tmp9 / tmp28
tmp30 = 0.5
tmp31 = tmp29 * tmp30
tmp32 = 0.0
tmp33 = tmp31 + tmp32
tmp34 = tmp18 / tmp28
tmp35 = tmp34 * tmp30
tmp36 = tmp33 + tmp35
tmp37 = tmp27 / tmp28
tmp38 = tmp37 * tmp30
tmp39 = tmp36 + tmp38
tmp40 = 1.0
tmp41 = tmp39 * tmp40
tmp42 = tmp41 * tmp40
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp42, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
assert_size_stride(arg2_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [heatmap_pred_1, heatmap_gt_1, mse_loss, mul_2, loss, mul_3, mul_4, mse_loss_1, mul_5, loss_1, mul_6, mul_7, mse_loss_2, mul_8, loss_2, truediv, mul_9], Original ATen: [aten.mul, aten.mse_loss, aten.add, aten.div]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_mse_loss_mul_0.run(buf3, arg0_1, arg2_1, arg1_1, 1, 4, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class CombinedTargetMSELoss(nn.Module):
"""MSE loss for combined target.
CombinedTarget: The combination of classification target
(response map) and regression target (offset map).
Paper ref: Huang et al. The Devil is in the Details: Delving into
Unbiased Data Processing for Human Pose Estimation (CVPR 2020).
Args:
use_target_weight (bool): Option to use weighted MSE loss.
Different joint types may have different target weights.
loss_weight (float): Weight of the loss. Default: 1.0.
"""
def __init__(self, use_target_weight, loss_weight=1.0):
super().__init__()
self.criterion = nn.MSELoss(reduction='mean')
self.use_target_weight = use_target_weight
self.loss_weight = loss_weight
def forward(self, output, target, target_weight):
batch_size = output.size(0)
num_channels = output.size(1)
heatmaps_pred = output.reshape((batch_size, num_channels, -1)).split(
1, 1)
heatmaps_gt = target.reshape((batch_size, num_channels, -1)).split(1, 1
)
loss = 0.0
num_joints = num_channels // 3
for idx in range(num_joints):
heatmap_pred = heatmaps_pred[idx * 3].squeeze()
heatmap_gt = heatmaps_gt[idx * 3].squeeze()
offset_x_pred = heatmaps_pred[idx * 3 + 1].squeeze()
offset_x_gt = heatmaps_gt[idx * 3 + 1].squeeze()
offset_y_pred = heatmaps_pred[idx * 3 + 2].squeeze()
offset_y_gt = heatmaps_gt[idx * 3 + 2].squeeze()
if self.use_target_weight:
heatmap_pred = heatmap_pred * target_weight[:, idx]
heatmap_gt = heatmap_gt * target_weight[:, idx]
loss += 0.5 * self.criterion(heatmap_pred, heatmap_gt)
loss += 0.5 * self.criterion(heatmap_gt * offset_x_pred,
heatmap_gt * offset_x_gt)
loss += 0.5 * self.criterion(heatmap_gt * offset_y_pred,
heatmap_gt * offset_y_gt)
return loss / num_joints * self.loss_weight
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'use_target_weight': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_mse_loss_mul_0(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + 4 * r0, None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr2 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr2 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp5 = tmp2 - tmp4
tmp6 = tmp5 * tmp5
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.sum(tmp7, 1)[:, None]
tmp11 = tmp4 * tmp10
tmp13 = tmp4 * tmp12
tmp14 = tmp11 - tmp13
tmp15 = tmp14 * tmp14
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp18 = tl.sum(tmp16, 1)[:, None]
tmp20 = tmp4 * tmp19
tmp22 = tmp4 * tmp21
tmp23 = tmp20 - tmp22
tmp24 = tmp23 * tmp23
tmp25 = tl.broadcast_to(tmp24, [XBLOCK, RBLOCK])
tmp27 = tl.sum(tmp25, 1)[:, None]
tmp28 = 4.0
tmp29 = tmp9 / tmp28
tmp30 = 0.5
tmp31 = tmp29 * tmp30
tmp32 = 0.0
tmp33 = tmp31 + tmp32
tmp34 = tmp18 / tmp28
tmp35 = tmp34 * tmp30
tmp36 = tmp33 + tmp35
tmp37 = tmp27 / tmp28
tmp38 = tmp37 * tmp30
tmp39 = tmp36 + tmp38
tmp40 = 1.0
tmp41 = tmp39 * tmp40
tmp42 = tmp41 * tmp40
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp42, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
assert_size_stride(arg2_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_div_mse_loss_mul_0[grid(1)](buf3, arg0_1,
arg2_1, arg1_1, 1, 4, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf3,
class CombinedTargetMSELossNew(nn.Module):
"""MSE loss for combined target.
CombinedTarget: The combination of classification target
(response map) and regression target (offset map).
Paper ref: Huang et al. The Devil is in the Details: Delving into
Unbiased Data Processing for Human Pose Estimation (CVPR 2020).
Args:
use_target_weight (bool): Option to use weighted MSE loss.
Different joint types may have different target weights.
loss_weight (float): Weight of the loss. Default: 1.0.
"""
def __init__(self, use_target_weight, loss_weight=1.0):
super().__init__()
self.criterion = nn.MSELoss(reduction='mean')
self.use_target_weight = use_target_weight
self.loss_weight = loss_weight
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| atoaiari/mmpose | CombinedTargetMSELoss | false | 6,304 | [
"Apache-2.0"
] | 1 | 256a9117767008e8c33b4038a346aca12233e300 | https://github.com/atoaiari/mmpose/tree/256a9117767008e8c33b4038a346aca12233e300 | import torch
import torch.nn as nn
class Model(nn.Module):
"""MSE loss for combined target.
CombinedTarget: The combination of classification target
(response map) and regression target (offset map).
Paper ref: Huang et al. The Devil is in the Details: Delving into
Unbiased Data Processing for Human Pose Estimation (CVPR 2020).
Args:
use_target_weight (bool): Option to use weighted MSE loss.
Different joint types may have different target weights.
loss_weight (float): Weight of the loss. Default: 1.0.
"""
def __init__(self, use_target_weight, loss_weight=1.0):
super().__init__()
self.criterion = nn.MSELoss(reduction='mean')
self.use_target_weight = use_target_weight
self.loss_weight = loss_weight
def forward(self, output, target, target_weight):
batch_size = output.size(0)
num_channels = output.size(1)
heatmaps_pred = output.reshape((batch_size, num_channels, -1)).split(
1, 1)
heatmaps_gt = target.reshape((batch_size, num_channels, -1)).split(1, 1
)
loss = 0.0
num_joints = num_channels // 3
for idx in range(num_joints):
heatmap_pred = heatmaps_pred[idx * 3].squeeze()
heatmap_gt = heatmaps_gt[idx * 3].squeeze()
offset_x_pred = heatmaps_pred[idx * 3 + 1].squeeze()
offset_x_gt = heatmaps_gt[idx * 3 + 1].squeeze()
offset_y_pred = heatmaps_pred[idx * 3 + 2].squeeze()
offset_y_gt = heatmaps_gt[idx * 3 + 2].squeeze()
if self.use_target_weight:
heatmap_pred = heatmap_pred * target_weight[:, idx]
heatmap_gt = heatmap_gt * target_weight[:, idx]
loss += 0.5 * self.criterion(heatmap_pred, heatmap_gt)
loss += 0.5 * self.criterion(heatmap_gt * offset_x_pred,
heatmap_gt * offset_x_gt)
loss += 0.5 * self.criterion(heatmap_gt * offset_y_pred,
heatmap_gt * offset_y_gt)
return loss / num_joints * self.loss_weight
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [4]
|
DiceLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/nl/cnlmc4rotaarrtvfztzaiegrcnaxsa6ub4pi24tghi44y22jtozp.py
# Topologically Sorted Source Nodes: [mul, sum_1, sum_2, pow_1, sum_4, sum_5, pow_2, sum_7, sum_8], Original ATen: [aten.mul, aten.sum, aten.pow]
# Source node to ATen node mapping:
# mul => mul
# pow_1 => pow_1
# pow_2 => pow_2
# sum_1 => sum_1
# sum_2 => sum_2
# sum_4 => sum_4
# sum_5 => sum_5
# sum_7 => sum_7
# sum_8 => sum_8
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze, %arg1_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sum_1, [1]), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%squeeze, 2), kwargs = {})
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1]), kwargs = {})
# %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sum_4, [1]), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg1_1, 2), kwargs = {})
# %sum_7 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_2, [1]), kwargs = {})
# %sum_8 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sum_7, [1]), kwargs = {})
triton_poi_fused_mul_pow_sum_0 = async_compile.triton('triton_poi_fused_mul_pow_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_pow_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 32, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_pow_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + (64*x1)), xmask)
tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask)
tmp4 = tl.load(in_ptr1 + (16 + x0 + (64*x1)), xmask)
tmp7 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask)
tmp8 = tl.load(in_ptr1 + (32 + x0 + (64*x1)), xmask)
tmp11 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask)
tmp12 = tl.load(in_ptr1 + (48 + x0 + (64*x1)), xmask)
tmp15 = tl.load(in_ptr0 + (4 + x0 + (64*x1)), xmask)
tmp16 = tl.load(in_ptr1 + (4 + x0 + (64*x1)), xmask)
tmp18 = tl.load(in_ptr0 + (20 + x0 + (64*x1)), xmask)
tmp19 = tl.load(in_ptr1 + (20 + x0 + (64*x1)), xmask)
tmp22 = tl.load(in_ptr0 + (36 + x0 + (64*x1)), xmask)
tmp23 = tl.load(in_ptr1 + (36 + x0 + (64*x1)), xmask)
tmp26 = tl.load(in_ptr0 + (52 + x0 + (64*x1)), xmask)
tmp27 = tl.load(in_ptr1 + (52 + x0 + (64*x1)), xmask)
tmp31 = tl.load(in_ptr0 + (8 + x0 + (64*x1)), xmask)
tmp32 = tl.load(in_ptr1 + (8 + x0 + (64*x1)), xmask)
tmp34 = tl.load(in_ptr0 + (24 + x0 + (64*x1)), xmask)
tmp35 = tl.load(in_ptr1 + (24 + x0 + (64*x1)), xmask)
tmp38 = tl.load(in_ptr0 + (40 + x0 + (64*x1)), xmask)
tmp39 = tl.load(in_ptr1 + (40 + x0 + (64*x1)), xmask)
tmp42 = tl.load(in_ptr0 + (56 + x0 + (64*x1)), xmask)
tmp43 = tl.load(in_ptr1 + (56 + x0 + (64*x1)), xmask)
tmp47 = tl.load(in_ptr0 + (12 + x0 + (64*x1)), xmask)
tmp48 = tl.load(in_ptr1 + (12 + x0 + (64*x1)), xmask)
tmp50 = tl.load(in_ptr0 + (28 + x0 + (64*x1)), xmask)
tmp51 = tl.load(in_ptr1 + (28 + x0 + (64*x1)), xmask)
tmp54 = tl.load(in_ptr0 + (44 + x0 + (64*x1)), xmask)
tmp55 = tl.load(in_ptr1 + (44 + x0 + (64*x1)), xmask)
tmp58 = tl.load(in_ptr0 + (60 + x0 + (64*x1)), xmask)
tmp59 = tl.load(in_ptr1 + (60 + x0 + (64*x1)), xmask)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tmp17 = tmp15 * tmp16
tmp20 = tmp18 * tmp19
tmp21 = tmp17 + tmp20
tmp24 = tmp22 * tmp23
tmp25 = tmp21 + tmp24
tmp28 = tmp26 * tmp27
tmp29 = tmp25 + tmp28
tmp30 = tmp14 + tmp29
tmp33 = tmp31 * tmp32
tmp36 = tmp34 * tmp35
tmp37 = tmp33 + tmp36
tmp40 = tmp38 * tmp39
tmp41 = tmp37 + tmp40
tmp44 = tmp42 * tmp43
tmp45 = tmp41 + tmp44
tmp46 = tmp30 + tmp45
tmp49 = tmp47 * tmp48
tmp52 = tmp50 * tmp51
tmp53 = tmp49 + tmp52
tmp56 = tmp54 * tmp55
tmp57 = tmp53 + tmp56
tmp60 = tmp58 * tmp59
tmp61 = tmp57 + tmp60
tmp62 = tmp46 + tmp61
tmp63 = tmp0 * tmp0
tmp64 = tmp3 * tmp3
tmp65 = tmp63 + tmp64
tmp66 = tmp7 * tmp7
tmp67 = tmp65 + tmp66
tmp68 = tmp11 * tmp11
tmp69 = tmp67 + tmp68
tmp70 = tmp15 * tmp15
tmp71 = tmp18 * tmp18
tmp72 = tmp70 + tmp71
tmp73 = tmp22 * tmp22
tmp74 = tmp72 + tmp73
tmp75 = tmp26 * tmp26
tmp76 = tmp74 + tmp75
tmp77 = tmp69 + tmp76
tmp78 = tmp31 * tmp31
tmp79 = tmp34 * tmp34
tmp80 = tmp78 + tmp79
tmp81 = tmp38 * tmp38
tmp82 = tmp80 + tmp81
tmp83 = tmp42 * tmp42
tmp84 = tmp82 + tmp83
tmp85 = tmp77 + tmp84
tmp86 = tmp47 * tmp47
tmp87 = tmp50 * tmp50
tmp88 = tmp86 + tmp87
tmp89 = tmp54 * tmp54
tmp90 = tmp88 + tmp89
tmp91 = tmp58 * tmp58
tmp92 = tmp90 + tmp91
tmp93 = tmp85 + tmp92
tmp94 = tmp1 * tmp1
tmp95 = tmp4 * tmp4
tmp96 = tmp94 + tmp95
tmp97 = tmp8 * tmp8
tmp98 = tmp96 + tmp97
tmp99 = tmp12 * tmp12
tmp100 = tmp98 + tmp99
tmp101 = tmp16 * tmp16
tmp102 = tmp19 * tmp19
tmp103 = tmp101 + tmp102
tmp104 = tmp23 * tmp23
tmp105 = tmp103 + tmp104
tmp106 = tmp27 * tmp27
tmp107 = tmp105 + tmp106
tmp108 = tmp100 + tmp107
tmp109 = tmp32 * tmp32
tmp110 = tmp35 * tmp35
tmp111 = tmp109 + tmp110
tmp112 = tmp39 * tmp39
tmp113 = tmp111 + tmp112
tmp114 = tmp43 * tmp43
tmp115 = tmp113 + tmp114
tmp116 = tmp108 + tmp115
tmp117 = tmp48 * tmp48
tmp118 = tmp51 * tmp51
tmp119 = tmp117 + tmp118
tmp120 = tmp55 * tmp55
tmp121 = tmp119 + tmp120
tmp122 = tmp59 * tmp59
tmp123 = tmp121 + tmp122
tmp124 = tmp116 + tmp123
tl.store(out_ptr0 + (x2), tmp62, xmask)
tl.store(out_ptr1 + (x2), tmp93, xmask)
tl.store(out_ptr2 + (x2), tmp124, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/bq/cbqgfdsdskelqjbw4r3hudlpfrzmjhr2qjb4hibcjvwgwcovvolq.py
# Topologically Sorted Source Nodes: [sum_3, mul_1, sum_6, sum_9, add, add_1, dice, sub, mean], Original ATen: [aten.sum, aten.mul, aten.add, aten.div, aten.rsub, aten.mean]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# dice => div
# mean => mean
# mul_1 => mul_1
# sub => sub
# sum_3 => sum_3
# sum_6 => sum_6
# sum_9 => sum_9
# Graph fragment:
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sum_2, [1]), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_3, 2), kwargs = {})
# %sum_6 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sum_5, [1]), kwargs = {})
# %sum_9 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sum_8, [1]), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_6, %sum_9), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, 1e-05), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_1, %add_1), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub,), kwargs = {})
triton_per_fused_add_div_mean_mul_rsub_sum_1 = async_compile.triton('triton_per_fused_add_div_mean_mul_rsub_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mean_mul_rsub_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mean_mul_rsub_sum_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (4*r0), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (4*r0), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr2 + (4*r0), None, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr2 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr2 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr2 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 2.0
tmp8 = tmp6 * tmp7
tmp11 = tmp9 + tmp10
tmp13 = tmp11 + tmp12
tmp15 = tmp13 + tmp14
tmp18 = tmp16 + tmp17
tmp20 = tmp18 + tmp19
tmp22 = tmp20 + tmp21
tmp23 = tmp15 + tmp22
tmp24 = 1e-05
tmp25 = tmp23 + tmp24
tmp26 = tmp8 / tmp25
tmp27 = 1.0
tmp28 = tmp27 - tmp26
tmp29 = tl.broadcast_to(tmp28, [XBLOCK, RBLOCK])
tmp31 = tl.sum(tmp29, 1)[:, None]
tmp32 = 4.0
tmp33 = tmp31 / tmp32
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp33, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, sum_1, sum_2, pow_1, sum_4, sum_5, pow_2, sum_7, sum_8], Original ATen: [aten.mul, aten.sum, aten.pow]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_pow_sum_0.run(arg0_1, arg1_1, buf0, buf1, buf2, 16, grid=grid(16), stream=stream0)
del arg0_1
del arg1_1
buf4 = empty_strided_cuda((), (), torch.float32)
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [sum_3, mul_1, sum_6, sum_9, add, add_1, dice, sub, mean], Original ATen: [aten.sum, aten.mul, aten.add, aten.div, aten.rsub, aten.mean]
triton_per_fused_add_div_mean_mul_rsub_sum_1.run(buf5, buf0, buf1, buf2, 1, 4, grid=grid(1), stream=stream0)
del buf0
del buf1
del buf2
return (buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
import torch
class DiceLoss(nn.Module):
def __init__(self):
super(DiceLoss, self).__init__()
def forward(self, pred, target):
pred = pred.squeeze(dim=1)
dice = 2 * (pred * target).sum(dim=1).sum(dim=1).sum(dim=1) / (pred
.pow(2).sum(dim=1).sum(dim=1).sum(dim=1) + target.pow(2).sum(
dim=1).sum(dim=1).sum(dim=1) + 1e-05)
return (1 - dice).mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
import torch
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_pow_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp4 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask)
tmp7 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp8 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask)
tmp11 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp12 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask)
tmp15 = tl.load(in_ptr0 + (4 + x0 + 64 * x1), xmask)
tmp16 = tl.load(in_ptr1 + (4 + x0 + 64 * x1), xmask)
tmp18 = tl.load(in_ptr0 + (20 + x0 + 64 * x1), xmask)
tmp19 = tl.load(in_ptr1 + (20 + x0 + 64 * x1), xmask)
tmp22 = tl.load(in_ptr0 + (36 + x0 + 64 * x1), xmask)
tmp23 = tl.load(in_ptr1 + (36 + x0 + 64 * x1), xmask)
tmp26 = tl.load(in_ptr0 + (52 + x0 + 64 * x1), xmask)
tmp27 = tl.load(in_ptr1 + (52 + x0 + 64 * x1), xmask)
tmp31 = tl.load(in_ptr0 + (8 + x0 + 64 * x1), xmask)
tmp32 = tl.load(in_ptr1 + (8 + x0 + 64 * x1), xmask)
tmp34 = tl.load(in_ptr0 + (24 + x0 + 64 * x1), xmask)
tmp35 = tl.load(in_ptr1 + (24 + x0 + 64 * x1), xmask)
tmp38 = tl.load(in_ptr0 + (40 + x0 + 64 * x1), xmask)
tmp39 = tl.load(in_ptr1 + (40 + x0 + 64 * x1), xmask)
tmp42 = tl.load(in_ptr0 + (56 + x0 + 64 * x1), xmask)
tmp43 = tl.load(in_ptr1 + (56 + x0 + 64 * x1), xmask)
tmp47 = tl.load(in_ptr0 + (12 + x0 + 64 * x1), xmask)
tmp48 = tl.load(in_ptr1 + (12 + x0 + 64 * x1), xmask)
tmp50 = tl.load(in_ptr0 + (28 + x0 + 64 * x1), xmask)
tmp51 = tl.load(in_ptr1 + (28 + x0 + 64 * x1), xmask)
tmp54 = tl.load(in_ptr0 + (44 + x0 + 64 * x1), xmask)
tmp55 = tl.load(in_ptr1 + (44 + x0 + 64 * x1), xmask)
tmp58 = tl.load(in_ptr0 + (60 + x0 + 64 * x1), xmask)
tmp59 = tl.load(in_ptr1 + (60 + x0 + 64 * x1), xmask)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tmp17 = tmp15 * tmp16
tmp20 = tmp18 * tmp19
tmp21 = tmp17 + tmp20
tmp24 = tmp22 * tmp23
tmp25 = tmp21 + tmp24
tmp28 = tmp26 * tmp27
tmp29 = tmp25 + tmp28
tmp30 = tmp14 + tmp29
tmp33 = tmp31 * tmp32
tmp36 = tmp34 * tmp35
tmp37 = tmp33 + tmp36
tmp40 = tmp38 * tmp39
tmp41 = tmp37 + tmp40
tmp44 = tmp42 * tmp43
tmp45 = tmp41 + tmp44
tmp46 = tmp30 + tmp45
tmp49 = tmp47 * tmp48
tmp52 = tmp50 * tmp51
tmp53 = tmp49 + tmp52
tmp56 = tmp54 * tmp55
tmp57 = tmp53 + tmp56
tmp60 = tmp58 * tmp59
tmp61 = tmp57 + tmp60
tmp62 = tmp46 + tmp61
tmp63 = tmp0 * tmp0
tmp64 = tmp3 * tmp3
tmp65 = tmp63 + tmp64
tmp66 = tmp7 * tmp7
tmp67 = tmp65 + tmp66
tmp68 = tmp11 * tmp11
tmp69 = tmp67 + tmp68
tmp70 = tmp15 * tmp15
tmp71 = tmp18 * tmp18
tmp72 = tmp70 + tmp71
tmp73 = tmp22 * tmp22
tmp74 = tmp72 + tmp73
tmp75 = tmp26 * tmp26
tmp76 = tmp74 + tmp75
tmp77 = tmp69 + tmp76
tmp78 = tmp31 * tmp31
tmp79 = tmp34 * tmp34
tmp80 = tmp78 + tmp79
tmp81 = tmp38 * tmp38
tmp82 = tmp80 + tmp81
tmp83 = tmp42 * tmp42
tmp84 = tmp82 + tmp83
tmp85 = tmp77 + tmp84
tmp86 = tmp47 * tmp47
tmp87 = tmp50 * tmp50
tmp88 = tmp86 + tmp87
tmp89 = tmp54 * tmp54
tmp90 = tmp88 + tmp89
tmp91 = tmp58 * tmp58
tmp92 = tmp90 + tmp91
tmp93 = tmp85 + tmp92
tmp94 = tmp1 * tmp1
tmp95 = tmp4 * tmp4
tmp96 = tmp94 + tmp95
tmp97 = tmp8 * tmp8
tmp98 = tmp96 + tmp97
tmp99 = tmp12 * tmp12
tmp100 = tmp98 + tmp99
tmp101 = tmp16 * tmp16
tmp102 = tmp19 * tmp19
tmp103 = tmp101 + tmp102
tmp104 = tmp23 * tmp23
tmp105 = tmp103 + tmp104
tmp106 = tmp27 * tmp27
tmp107 = tmp105 + tmp106
tmp108 = tmp100 + tmp107
tmp109 = tmp32 * tmp32
tmp110 = tmp35 * tmp35
tmp111 = tmp109 + tmp110
tmp112 = tmp39 * tmp39
tmp113 = tmp111 + tmp112
tmp114 = tmp43 * tmp43
tmp115 = tmp113 + tmp114
tmp116 = tmp108 + tmp115
tmp117 = tmp48 * tmp48
tmp118 = tmp51 * tmp51
tmp119 = tmp117 + tmp118
tmp120 = tmp55 * tmp55
tmp121 = tmp119 + tmp120
tmp122 = tmp59 * tmp59
tmp123 = tmp121 + tmp122
tmp124 = tmp116 + tmp123
tl.store(out_ptr0 + x2, tmp62, xmask)
tl.store(out_ptr1 + x2, tmp93, xmask)
tl.store(out_ptr2 + x2, tmp124, xmask)
@triton.jit
def triton_per_fused_add_div_mean_mul_rsub_sum_1(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr2 + 4 * r0, None, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr2 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr2 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr2 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 2.0
tmp8 = tmp6 * tmp7
tmp11 = tmp9 + tmp10
tmp13 = tmp11 + tmp12
tmp15 = tmp13 + tmp14
tmp18 = tmp16 + tmp17
tmp20 = tmp18 + tmp19
tmp22 = tmp20 + tmp21
tmp23 = tmp15 + tmp22
tmp24 = 1e-05
tmp25 = tmp23 + tmp24
tmp26 = tmp8 / tmp25
tmp27 = 1.0
tmp28 = tmp27 - tmp26
tmp29 = tl.broadcast_to(tmp28, [XBLOCK, RBLOCK])
tmp31 = tl.sum(tmp29, 1)[:, None]
tmp32 = 4.0
tmp33 = tmp31 / tmp32
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp33, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_pow_sum_0[grid(16)](arg0_1, arg1_1, buf0, buf1,
buf2, 16, XBLOCK=16, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
buf4 = empty_strided_cuda((), (), torch.float32)
buf5 = buf4
del buf4
triton_per_fused_add_div_mean_mul_rsub_sum_1[grid(1)](buf5, buf0,
buf1, buf2, 1, 4, XBLOCK=1, num_warps=2, num_stages=1)
del buf0
del buf1
del buf2
return buf5,
class DiceLossNew(nn.Module):
def __init__(self):
super(DiceLossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| ayanglab/HDL | DiceLoss | false | 6,305 | [
"Apache-2.0"
] | 1 | 5ff778d713331671ffa85e9fb63378d8c0a57769 | https://github.com/ayanglab/HDL/tree/5ff778d713331671ffa85e9fb63378d8c0a57769 | import torch
import torch.nn as nn
import torch.utils.data
import torch
class Model(nn.Module):
def __init__(self):
super().__init__()
def forward(self, pred, target):
pred = pred.squeeze(dim=1)
dice = 2 * (pred * target).sum(dim=1).sum(dim=1).sum(dim=1) / (pred
.pow(2).sum(dim=1).sum(dim=1).sum(dim=1) + target.pow(2).sum(
dim=1).sum(dim=1).sum(dim=1) + 1e-05)
return (1 - dice).mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
SigmoidBCELoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/5u/c5u7c3xfjaazdtxwasop4c2d3vlwaagqtikdsnpoz56tcxjbvfvx.py
# Topologically Sorted Source Nodes: [binary_cross_entropy_with_logits], Original ATen: [aten.binary_cross_entropy_with_logits]
# Source node to ATen node mapping:
# binary_cross_entropy_with_logits => abs_1, exp, full_default, log1p, mean, minimum, mul, neg, sub, sub_1, sub_2
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %squeeze), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default, %squeeze), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%squeeze,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %log1p), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %sub_1), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_2,), kwargs = {})
triton_per_fused_binary_cross_entropy_with_logits_0 = async_compile.triton('triton_per_fused_binary_cross_entropy_with_logits_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_binary_cross_entropy_with_logits_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_binary_cross_entropy_with_logits_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp3 = tl.load(in_ptr1 + (r0), None)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp2 * tmp3
tmp5 = 0.0
tmp6 = triton_helpers.minimum(tmp5, tmp3)
tmp7 = tl_math.abs(tmp3)
tmp8 = -tmp7
tmp9 = tl_math.exp(tmp8)
tmp10 = libdevice.log1p(tmp9)
tmp11 = tmp6 - tmp10
tmp12 = tmp4 - tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = 256.0
tmp17 = tmp15 / tmp16
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp17, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [binary_cross_entropy_with_logits], Original ATen: [aten.binary_cross_entropy_with_logits]
stream0 = get_raw_stream(0)
triton_per_fused_binary_cross_entropy_with_logits_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class SigmoidBCELoss(nn.BCEWithLogitsLoss):
def __init__(self, **kwargs):
super(SigmoidBCELoss, self).__init__(**kwargs)
def forward(self, input, target):
input = input.squeeze(1)
target = target.float()
return super(SigmoidBCELoss, self).forward(input, target)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_binary_cross_entropy_with_logits_0(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp2 * tmp3
tmp5 = 0.0
tmp6 = triton_helpers.minimum(tmp5, tmp3)
tmp7 = tl_math.abs(tmp3)
tmp8 = -tmp7
tmp9 = tl_math.exp(tmp8)
tmp10 = libdevice.log1p(tmp9)
tmp11 = tmp6 - tmp10
tmp12 = tmp4 - tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = 256.0
tmp17 = tmp15 / tmp16
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp17, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_binary_cross_entropy_with_logits_0[grid(1)](buf1,
arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class SigmoidBCELossNew(nn.BCEWithLogitsLoss):
def __init__(self, **kwargs):
super(SigmoidBCELossNew, self).__init__(**kwargs)
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| azxj/BRRNet | SigmoidBCELoss | false | 6,306 | [
"MIT"
] | 1 | 274068efd5453f2c1fb07bfaad448d048b9c793b | https://github.com/azxj/BRRNet/tree/274068efd5453f2c1fb07bfaad448d048b9c793b | import torch
import torch.nn as nn
class Model(nn.BCEWithLogitsLoss):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def forward(self, input, target):
input = input.squeeze(1)
target = target.float()
return super(SigmoidBCELoss, self).forward(input, target)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
SqueezeExcitation | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/l3/cl35tzbhrd24dhunkbb6gjs54aklpyr46oikqhoylcgmkcmhujil.py
# Topologically Sorted Source Nodes: [scale], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# scale => mean
# Graph fragment:
# %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [-1, -2], True), kwargs = {})
triton_per_fused_mean_0 = async_compile.triton('triton_per_fused_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/sh/cshbgrlhlsuuebcz7jbje66sr2nkeng6kilqpqluwrr5ru2afxle.py
# Topologically Sorted Source Nodes: [scale_1, scale_2], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# scale_1 => convolution
# scale_2 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%mean, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/22/c22ve3r4y5fft3g2xdkwnomz747tvayj57gf6zszmac5hjy65wgt.py
# Topologically Sorted Source Nodes: [scale_3, scale_4, mul], Original ATen: [aten.convolution, aten.hardsigmoid, aten.mul]
# Source node to ATen node mapping:
# mul => mul
# scale_3 => convolution_1
# scale_4 => add, clamp_max, clamp_min, div
# Graph fragment:
# %convolution_1 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_1, 3), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add, 0), kwargs = {})
# %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 6), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%clamp_max, 6), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %primals_1), kwargs = {})
triton_poi_fused_convolution_hardsigmoid_mul_2 = async_compile.triton('triton_poi_fused_convolution_hardsigmoid_mul_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_hardsigmoid_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_hardsigmoid_mul_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = (xindex // 16)
x1 = (xindex // 16) % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr2 + (x4), xmask)
tmp2 = tmp0 + tmp1
tmp3 = 3.0
tmp4 = tmp2 + tmp3
tmp5 = 0.0
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp7 = 6.0
tmp8 = triton_helpers.minimum(tmp6, tmp7)
tmp9 = 0.16666666666666666
tmp10 = tmp8 * tmp9
tmp12 = tmp10 * tmp11
tl.store(out_ptr0 + (x4), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/hv/chvrs33e2wyvaktcs4kouk236tzwn7sho4sgx2yjf7sjc72okw6d.py
# Topologically Sorted Source Nodes: [scale_3], Original ATen: [aten.convolution, aten.hardsigmoid_backward]
# Source node to ATen node mapping:
# scale_3 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_1, -3.0), kwargs = {})
# %lt : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%convolution_1, 3.0), kwargs = {})
# %bitwise_and : [num_users=1] = call_function[target=torch.ops.aten.bitwise_and.Tensor](args = (%gt, %lt), kwargs = {})
triton_poi_fused_convolution_hardsigmoid_backward_3 = async_compile.triton('triton_poi_fused_convolution_hardsigmoid_backward_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_hardsigmoid_backward_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_hardsigmoid_backward_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = -3.0
tmp4 = tmp2 > tmp3
tmp5 = 3.0
tmp6 = tmp2 < tmp5
tmp7 = tmp4 & tmp6
tl.store(out_ptr0 + (x2), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (8, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (8, ), (1, ))
assert_size_stride(primals_4, (4, 8, 1, 1), (8, 1, 1, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [scale], Original ATen: [aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_mean_0.run(buf1, primals_1, 16, 16, grid=grid(16), stream=stream0)
# Topologically Sorted Source Nodes: [scale_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 8, 1, 1), (8, 1, 1, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [scale_1, scale_2], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf3, primals_3, 32, grid=grid(32), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [scale_3], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 1, 1), (4, 1, 1, 1))
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [scale_3, scale_4, mul], Original ATen: [aten.convolution, aten.hardsigmoid, aten.mul]
triton_poi_fused_convolution_hardsigmoid_mul_2.run(buf4, primals_5, primals_1, buf5, 256, grid=grid(256), stream=stream0)
buf6 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.bool)
# Topologically Sorted Source Nodes: [scale_3], Original ATen: [aten.convolution, aten.hardsigmoid_backward]
triton_poi_fused_convolution_hardsigmoid_backward_3.run(buf4, primals_5, buf6, 16, grid=grid(16), stream=stream0)
del buf4
del primals_5
return (buf5, primals_1, primals_2, primals_4, buf1, buf3, buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((8, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 8, 1, 1), (8, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import Tensor
import torch.nn.functional as F
from torch import nn
from torchvision.models.mobilenetv2 import _make_divisible
class SqueezeExcitation(nn.Module):
def __init__(self, input_channels: 'int', squeeze_factor: 'int'=4):
super().__init__()
squeeze_channels = _make_divisible(input_channels // squeeze_factor, 8)
self.fc1 = nn.Conv2d(input_channels, squeeze_channels, 1)
self.relu = nn.ReLU(inplace=True)
self.fc2 = nn.Conv2d(squeeze_channels, input_channels, 1)
def _scale(self, input: 'Tensor', inplace: 'bool') ->Tensor:
scale = F.adaptive_avg_pool2d(input, 1)
scale = self.fc1(scale)
scale = self.relu(scale)
scale = self.fc2(scale)
return F.hardsigmoid(scale, inplace=inplace)
def forward(self, input: 'Tensor') ->Tensor:
scale = self._scale(input, True)
return scale * input
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import Tensor
import torch.nn.functional as F
from torch import nn
from torchvision.models.mobilenetv2 import _make_divisible
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_hardsigmoid_mul_2(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex // 16
x1 = xindex // 16 % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr2 + x4, xmask)
tmp2 = tmp0 + tmp1
tmp3 = 3.0
tmp4 = tmp2 + tmp3
tmp5 = 0.0
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp7 = 6.0
tmp8 = triton_helpers.minimum(tmp6, tmp7)
tmp9 = 0.16666666666666666
tmp10 = tmp8 * tmp9
tmp12 = tmp10 * tmp11
tl.store(out_ptr0 + x4, tmp12, xmask)
@triton.jit
def triton_poi_fused_convolution_hardsigmoid_backward_3(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = -3.0
tmp4 = tmp2 > tmp3
tmp5 = 3.0
tmp6 = tmp2 < tmp5
tmp7 = tmp4 & tmp6
tl.store(out_ptr0 + x2, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (8, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (8,), (1,))
assert_size_stride(primals_4, (4, 8, 1, 1), (8, 1, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=8,
num_warps=2, num_stages=1)
buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 8, 1, 1), (8, 1, 1, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_1[grid(32)](buf3, primals_3, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_3
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 1, 1), (4, 1, 1, 1))
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_convolution_hardsigmoid_mul_2[grid(256)](buf4,
primals_5, primals_1, buf5, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf6 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.bool)
triton_poi_fused_convolution_hardsigmoid_backward_3[grid(16)](buf4,
primals_5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1)
del buf4
del primals_5
return buf5, primals_1, primals_2, primals_4, buf1, buf3, buf6
class SqueezeExcitationNew(nn.Module):
def __init__(self, input_channels: 'int', squeeze_factor: 'int'=4):
super().__init__()
squeeze_channels = _make_divisible(input_channels // squeeze_factor, 8)
self.fc1 = nn.Conv2d(input_channels, squeeze_channels, 1)
self.relu = nn.ReLU(inplace=True)
self.fc2 = nn.Conv2d(squeeze_channels, input_channels, 1)
def _scale(self, input: 'Tensor', inplace: 'bool') ->Tensor:
scale = F.adaptive_avg_pool2d(input, 1)
scale = self.fc1(scale)
scale = self.relu(scale)
scale = self.fc2(scale)
return F.hardsigmoid(scale, inplace=inplace)
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| ayrna/ordinal-cnn-ecoc | SqueezeExcitation | false | 6,307 | [
"BSD-3-Clause"
] | 1 | 2b7909d036612727a45a174c891c4e749c3b60c4 | https://github.com/ayrna/ordinal-cnn-ecoc/tree/2b7909d036612727a45a174c891c4e749c3b60c4 | import torch
from torch import Tensor
import torch.nn.functional as F
from torch import nn
from torchvision.models.mobilenetv2 import _make_divisible
class Model(nn.Module):
def __init__(self, input_channels: 'int', squeeze_factor: 'int'=4):
super().__init__()
squeeze_channels = _make_divisible(input_channels // squeeze_factor, 8)
self.fc1 = nn.Conv2d(input_channels, squeeze_channels, 1)
self.relu = nn.ReLU(inplace=True)
self.fc2 = nn.Conv2d(squeeze_channels, input_channels, 1)
def _scale(self, input: 'Tensor', inplace: 'bool') ->Tensor:
scale = F.adaptive_avg_pool2d(input, 1)
scale = self.fc1(scale)
scale = self.relu(scale)
scale = self.fc2(scale)
return F.hardsigmoid(scale, inplace=inplace)
def forward(self, input: 'Tensor') ->Tensor:
scale = self._scale(input, True)
return scale * input
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
templateModel | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/zv/czvfpj3ah2lefbwpcuw4esv23bxs5a3ab63ply3ntgbsdktepd5v.py
# Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# relu => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 18816
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 784) % 6
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/v7/cv7qi7gg3bpfwb3hj7zgy5jlgh7x7wdgqsfsodkjsoverxdjlf6z.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4704
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 14
x3 = (xindex // 14)
x2 = (xindex // 1176)
x4 = xindex % 1176
tmp0 = tl.load(in_ptr0 + ((2*x0) + (56*x3)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (56*x3)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (28 + (2*x0) + (56*x3)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (29 + (2*x0) + (56*x3)), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x4 + (1184*x2)), tmp6, xmask)
tl.store(out_ptr1 + (x4 + (1280*x2)), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/xe/cxelxvpw3asckozc53rh36773aohp5hqpbp2nos5ymcdqhxvo4bl.py
# Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# relu_1 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 6400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 100) % 16
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/tn/ctnw4tbgfy47ppke77vu7rtiz7dl5o3ahickx4p64n7c5rmrrix6.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_1 => _low_memory_max_pool2d_with_offsets_1, getitem_3
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets_1 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu_1, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 1600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = (xindex // 5)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (20*x1)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (20*x1)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (10 + (2*x0) + (20*x1)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (11 + (2*x0) + (20*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x2), tmp15, xmask)
tl.store(out_ptr1 + (x2), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/jn/cjnqv3sgcv5x2iz7ij5zdad6ofabcnonrlksgsxu2ob7n274gz6b.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_3 => relu_2
# Graph fragment:
# %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_7), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {})
triton_poi_fused_relu_4 = async_compile.triton('triton_poi_fused_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 480
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 120
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/6m/c6m6u2ctjb4r4ra3sizrwezzkzegfp2ombflmfg3dwjfci2pen7h.py
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_4 => relu_3
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_9), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_5 = async_compile.triton('triton_poi_fused_relu_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 336
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 84
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (6, 3, 5, 5), (75, 25, 5, 1))
assert_size_stride(primals_2, (6, ), (1, ))
assert_size_stride(primals_3, (4, 3, 32, 32), (3072, 1024, 32, 1))
assert_size_stride(primals_4, (16, 6, 5, 5), (150, 25, 5, 1))
assert_size_stride(primals_5, (16, ), (1, ))
assert_size_stride(primals_6, (120, 400), (400, 1))
assert_size_stride(primals_7, (120, ), (1, ))
assert_size_stride(primals_8, (84, 120), (120, 1))
assert_size_stride(primals_9, (84, ), (1, ))
assert_size_stride(primals_10, (43, 84), (84, 1))
assert_size_stride(primals_11, (43, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 6, 28, 28), (4704, 784, 28, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 18816, grid=grid(18816), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((4, 6, 14, 14), (1184, 196, 14, 1), torch.float32)
buf3 = empty_strided_cuda((4, 6, 14, 14), (1280, 196, 14, 1), torch.int8)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_1.run(buf1, buf2, buf3, 4704, grid=grid(4704), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 16, 10, 10), (1600, 100, 10, 1))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf5, primals_5, 6400, grid=grid(6400), stream=stream0)
del primals_5
buf6 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.int8)
buf7 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_3.run(buf5, buf6, buf7, 1600, grid=grid(1600), stream=stream0)
buf8 = empty_strided_cuda((4, 120), (120, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf7, (4, 400), (400, 1), 0), reinterpret_tensor(primals_6, (400, 120), (1, 400), 0), out=buf8)
buf9 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu]
triton_poi_fused_relu_4.run(buf9, primals_7, 480, grid=grid(480), stream=stream0)
del primals_7
buf10 = empty_strided_cuda((4, 84), (84, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf9, reinterpret_tensor(primals_8, (120, 84), (1, 120), 0), out=buf10)
buf11 = buf10; del buf10 # reuse
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu]
triton_poi_fused_relu_5.run(buf11, primals_9, 336, grid=grid(336), stream=stream0)
del primals_9
buf12 = empty_strided_cuda((4, 43), (43, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_11, buf11, reinterpret_tensor(primals_10, (84, 43), (1, 84), 0), alpha=1, beta=1, out=buf12)
del primals_11
return (buf12, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5, buf6, reinterpret_tensor(buf7, (4, 400), (400, 1), 0), buf9, buf11, primals_10, primals_8, primals_6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((6, 3, 5, 5), (75, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((6, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 32, 32), (3072, 1024, 32, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, 6, 5, 5), (150, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((120, 400), (400, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((120, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((84, 120), (120, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((84, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((43, 84), (84, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((43, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import logging
import torch
import numpy as np
import torch.nn as nn
from torch.nn import functional as F
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
class BaseModel(nn.Module):
"""
Base class for all models
All models require an initialization and forward method, and __str__ is what is shown when print(model) is called
"""
def __init__(self):
super(BaseModel, self).__init__()
self.logger = logging.getLogger(self.__class__.__name__)
def forward(self, *input):
"""
Forward pass logic
:return: Model output
"""
raise NotImplementedError
def __str__(self):
"""
Model prints with number of trainable parameters
"""
model_parameters = filter(lambda p: p.requires_grad, self.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
return super(BaseModel, self).__str__(
) + '\nTrainable parameters: {}'.format(params)
class templateModel(BaseModel):
def __init__(self):
super(templateModel, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 43)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def get_inputs():
return [torch.rand([4, 3, 32, 32])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import logging
import numpy as np
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 18816
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 784 % 6
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4704
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 14
x3 = xindex // 14
x2 = xindex // 1176
x4 = xindex % 1176
tmp0 = tl.load(in_ptr0 + (2 * x0 + 56 * x3), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 56 * x3), xmask, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (28 + 2 * x0 + 56 * x3), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (29 + 2 * x0 + 56 * x3), xmask,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x4 + 1184 * x2), tmp6, xmask)
tl.store(out_ptr1 + (x4 + 1280 * x2), tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 6400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 100 % 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 1600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = xindex // 5
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 20 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 20 * x1), xmask, eviction_policy
='evict_last')
tmp7 = tl.load(in_ptr0 + (10 + 2 * x0 + 20 * x1), xmask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (11 + 2 * x0 + 20 * x1), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x2, tmp15, xmask)
tl.store(out_ptr1 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 480
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 120
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 336
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 84
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (6, 3, 5, 5), (75, 25, 5, 1))
assert_size_stride(primals_2, (6,), (1,))
assert_size_stride(primals_3, (4, 3, 32, 32), (3072, 1024, 32, 1))
assert_size_stride(primals_4, (16, 6, 5, 5), (150, 25, 5, 1))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (120, 400), (400, 1))
assert_size_stride(primals_7, (120,), (1,))
assert_size_stride(primals_8, (84, 120), (120, 1))
assert_size_stride(primals_9, (84,), (1,))
assert_size_stride(primals_10, (43, 84), (84, 1))
assert_size_stride(primals_11, (43,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 6, 28, 28), (4704, 784, 28, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(18816)](buf1, primals_2,
18816, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 6, 14, 14), (1184, 196, 14, 1), torch
.float32)
buf3 = empty_strided_cuda((4, 6, 14, 14), (1280, 196, 14, 1), torch
.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(4704)](buf1, buf2,
buf3, 4704, XBLOCK=128, num_warps=4, num_stages=1)
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 16, 10, 10), (1600, 100, 10, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_2[grid(6400)](buf5, primals_5,
6400, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.int8)
buf7 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.float32
)
triton_poi_fused_max_pool2d_with_indices_3[grid(1600)](buf5, buf6,
buf7, 1600, XBLOCK=256, num_warps=4, num_stages=1)
buf8 = empty_strided_cuda((4, 120), (120, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf7, (4, 400), (400, 1), 0),
reinterpret_tensor(primals_6, (400, 120), (1, 400), 0), out=buf8)
buf9 = buf8
del buf8
triton_poi_fused_relu_4[grid(480)](buf9, primals_7, 480, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_7
buf10 = empty_strided_cuda((4, 84), (84, 1), torch.float32)
extern_kernels.mm(buf9, reinterpret_tensor(primals_8, (120, 84), (1,
120), 0), out=buf10)
buf11 = buf10
del buf10
triton_poi_fused_relu_5[grid(336)](buf11, primals_9, 336, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_9
buf12 = empty_strided_cuda((4, 43), (43, 1), torch.float32)
extern_kernels.addmm(primals_11, buf11, reinterpret_tensor(
primals_10, (84, 43), (1, 84), 0), alpha=1, beta=1, out=buf12)
del primals_11
return (buf12, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5,
buf6, reinterpret_tensor(buf7, (4, 400), (400, 1), 0), buf9, buf11,
primals_10, primals_8, primals_6)
class BaseModel(nn.Module):
"""
Base class for all models
All models require an initialization and forward method, and __str__ is what is shown when print(model) is called
"""
def __init__(self):
super(BaseModel, self).__init__()
self.logger = logging.getLogger(self.__class__.__name__)
def forward(self, *input):
"""
Forward pass logic
:return: Model output
"""
raise NotImplementedError
def __str__(self):
"""
Model prints with number of trainable parameters
"""
model_parameters = filter(lambda p: p.requires_grad, self.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
return super(BaseModel, self).__str__(
) + '\nTrainable parameters: {}'.format(params)
class templateModelNew(BaseModel):
def __init__(self):
super(templateModelNew, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 43)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.fc1.weight
primals_7 = self.fc1.bias
primals_8 = self.fc2.weight
primals_9 = self.fc2.bias
primals_10 = self.fc3.weight
primals_11 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
| awoloshuk/NephNet | templateModel | false | 6,308 | [
"MIT"
] | 1 | 562431364874fef1680069c7a5235c67b96504b8 | https://github.com/awoloshuk/NephNet/tree/562431364874fef1680069c7a5235c67b96504b8 | import logging
import torch
import numpy as np
import torch.nn as nn
from torch.nn import functional as F
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
class BaseModel(nn.Module):
"""
Base class for all models
All models require an initialization and forward method, and __str__ is what is shown when print(model) is called
"""
def __init__(self):
super().__init__()
self.logger = logging.getLogger(self.__class__.__name__)
def forward(self, *input):
"""
Forward pass logic
:return: Model output
"""
raise NotImplementedError
def __str__(self):
"""
Model prints with number of trainable parameters
"""
model_parameters = filter(lambda p: p.requires_grad, self.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
return super(BaseModel, self).__str__(
) + '\nTrainable parameters: {}'.format(params)
class Model(BaseModel):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 43)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def get_inputs():
return [torch.rand([4, 3, 32, 32])]
def get_init_inputs():
return []
|
ScaledDotProductAttention | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/wx/cwxwvlntewdrqi2r4caciy5ht4jdvafnhtiqncr4lo4aegcb4imz.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_2, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_2, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/fj/cfjl47pvhwbpfbvh6rfehwy5ijxc5p3zgkld2lwf3mw5bl6pbkak.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg0_1, (16, 4, 4), (16, 1, 4), 0), out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0)
buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf1, buf2, 256, grid=grid(256), stream=stream0)
buf3 = reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg2_1, (16, 4, 4), (16, 4, 1), 0), out=buf3)
del arg2_1
return (reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class ScaledDotProductAttention(nn.Module):
""" Scaled Dot-Product Attention --baseline version"""
def __init__(self, dropout=0.3):
super().__init__()
self.dropout = nn.Dropout(dropout)
def forward(self, q, k, v, mask=None):
attn = torch.matmul(q, k.transpose(2, 3))
if mask is not None:
attn = attn.masked_fill(mask == 0, -1000000000.0)
attn = self.dropout(F.softmax(attn, dim=-1))
output = torch.matmul(attn, v)
return output, attn
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(arg0_1, (16, 4, 4), (16, 1, 4), 0),
out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(256)](buf0, buf1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
triton_poi_fused__softmax_1[grid(256)](buf1, buf2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf3 = reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0)
del buf1
extern_kernels.bmm(reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(arg2_1, (16, 4, 4), (16, 4, 1), 0), out=buf3
)
del arg2_1
return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf2
class ScaledDotProductAttentionNew(nn.Module):
""" Scaled Dot-Product Attention --baseline version"""
def __init__(self, dropout=0.3):
super().__init__()
self.dropout = nn.Dropout(dropout)
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0], output[1]
| ayyyq/T-LSTM | ScaledDotProductAttention | false | 6,309 | [
"MIT"
] | 1 | 36dbc88ac710d3925851cd87c2368ecfc7061b70 | https://github.com/ayyyq/T-LSTM/tree/36dbc88ac710d3925851cd87c2368ecfc7061b70 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
""" Scaled Dot-Product Attention --baseline version"""
def __init__(self, dropout=0.3):
super().__init__()
self.dropout = nn.Dropout(dropout)
def forward(self, q, k, v, mask=None):
attn = torch.matmul(q, k.transpose(2, 3))
if mask is not None:
attn = attn.masked_fill(mask == 0, -1000000000.0)
attn = self.dropout(F.softmax(attn, dim=-1))
output = torch.matmul(attn, v)
return output, attn
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return []
|
SentenceMatrixLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/qz/cqz2secr535r76gqduuhmyhjt4aplvu6fvum4xsh5e3ep5fhn6kv.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%expand, %expand_1], -1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x4 = (xindex // 32)
x1 = (xindex // 8) % 4
x3 = (xindex // 128)
x5 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x4) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr0 + ((4*x1) + (16*x3) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x5), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/mf/cmffxi4mvuzp34r5n7jtektcfuii5ruvm4ohtlw23b3k2hkyj3uv.py
# Topologically Sorted Source Nodes: [mul, mul_1, A_esm, A_mask, eq, masked_fill], Original ATen: [aten.mul, aten.add, aten.eq, aten.masked_fill]
# Source node to ATen node mapping:
# A_esm => add
# A_mask => mul_2
# eq => eq
# masked_fill => full_default, where
# mul => mul
# mul_1 => mul_1
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze, 0.6), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_4, 0.4), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expand_2, %expand_3), kwargs = {})
# %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%mul_2, 0), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 9.999999717180685e-10), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %add), kwargs = {})
triton_poi_fused_add_eq_masked_fill_mul_1 = async_compile.triton('triton_poi_fused_add_eq_masked_fill_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_eq_masked_fill_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_eq_masked_fill_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = (xindex // 16) % 4
x4 = (xindex // 4) % 16
x5 = xindex % 64
x6 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x2)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (x4), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (x5), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + (x6), xmask)
tmp2 = tmp0 * tmp1
tmp3 = 0.0
tmp4 = tmp2 == tmp3
tmp6 = tl.sigmoid(tmp5)
tmp7 = 0.6
tmp8 = tmp6 * tmp7
tmp10 = 0.4
tmp11 = tmp9 * tmp10
tmp12 = tmp8 + tmp11
tmp13 = 9.999999717180685e-10
tmp14 = tl.where(tmp4, tmp13, tmp12)
tl.store(out_ptr0 + (x6), tmp14, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (1, 8), (8, 1))
assert_size_stride(primals_3, (1, ), (1, ))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, buf0, 512, grid=grid(512), stream=stream0)
del primals_1
buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (64, 8), (8, 1), 0), reinterpret_tensor(primals_2, (8, 1), (1, 8), 0), alpha=1, beta=1, out=buf2)
del primals_2
del primals_3
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, mul_1, A_esm, A_mask, eq, masked_fill], Original ATen: [aten.mul, aten.add, aten.eq, aten.masked_fill]
triton_poi_fused_add_eq_masked_fill_mul_1.run(primals_5, buf2, primals_4, buf3, 256, grid=grid(256), stream=stream0)
del primals_4
return (buf3, primals_5, reinterpret_tensor(buf0, (64, 8), (8, 1), 0), buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class SentenceMatrixLayer(nn.Module):
def __init__(self, in_size, out_size=1, p_Asem=0.6):
super(SentenceMatrixLayer, self).__init__()
self.in_size = in_size
self.out_size = out_size
self.p_Asem = p_Asem
self.linear = nn.Linear(in_size * 2, out_size)
def forward(self, x, adj, mask):
seq_len = x.shape[1]
xi = x.unsqueeze(2).expand(-1, -1, seq_len, -1)
xj = x.unsqueeze(1).expand(-1, seq_len, -1, -1)
xij = torch.sigmoid(self.linear(torch.cat((xi, xj), dim=-1))).squeeze(
-1)
A_esm = self.p_Asem * xij + (1 - self.p_Asem) * adj
assert mask.shape[1] == seq_len, 'seq_len inconsistent'
mask_i = mask.unsqueeze(1).expand(-1, seq_len, -1)
mask_j = mask.unsqueeze(2).expand(-1, -1, seq_len)
A_mask = mask_i * mask_j
return A_esm.masked_fill(A_mask == 0, 1e-09)
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand([4, 4])
]
def get_init_inputs():
return [[], {'in_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x4 = xindex // 32
x1 = xindex // 8 % 4
x3 = xindex // 128
x5 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x4 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr0 + (4 * x1 + 16 * x3 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x5, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_eq_masked_fill_mul_1(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex // 16 % 4
x4 = xindex // 4 % 16
x5 = xindex % 64
x6 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp1 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + x6, xmask)
tmp2 = tmp0 * tmp1
tmp3 = 0.0
tmp4 = tmp2 == tmp3
tmp6 = tl.sigmoid(tmp5)
tmp7 = 0.6
tmp8 = tmp6 * tmp7
tmp10 = 0.4
tmp11 = tmp9 * tmp10
tmp12 = tmp8 + tmp11
tmp13 = 9.999999717180685e-10
tmp14 = tl.where(tmp4, tmp13, tmp12)
tl.store(out_ptr0 + x6, tmp14, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (1, 8), (8, 1))
assert_size_stride(primals_3, (1,), (1,))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](primals_1, buf0, 512, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_1
buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (64, 8), (
8, 1), 0), reinterpret_tensor(primals_2, (8, 1), (1, 8), 0),
alpha=1, beta=1, out=buf2)
del primals_2
del primals_3
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_eq_masked_fill_mul_1[grid(256)](primals_5,
buf2, primals_4, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_4
return buf3, primals_5, reinterpret_tensor(buf0, (64, 8), (8, 1), 0), buf2
class SentenceMatrixLayerNew(nn.Module):
def __init__(self, in_size, out_size=1, p_Asem=0.6):
super(SentenceMatrixLayerNew, self).__init__()
self.in_size = in_size
self.out_size = out_size
self.p_Asem = p_Asem
self.linear = nn.Linear(in_size * 2, out_size)
def forward(self, input_0, input_1, input_2):
primals_2 = self.linear.weight
primals_3 = self.linear.bias
primals_1 = input_0
primals_4 = input_1
primals_5 = input_2
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| ayyyq/T-LSTM | SentenceMatrixLayer | false | 6,310 | [
"MIT"
] | 1 | 36dbc88ac710d3925851cd87c2368ecfc7061b70 | https://github.com/ayyyq/T-LSTM/tree/36dbc88ac710d3925851cd87c2368ecfc7061b70 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, in_size, out_size=1, p_Asem=0.6):
super().__init__()
self.in_size = in_size
self.out_size = out_size
self.p_Asem = p_Asem
self.linear = nn.Linear(in_size * 2, out_size)
def forward(self, x, adj, mask):
seq_len = x.shape[1]
xi = x.unsqueeze(2).expand(-1, -1, seq_len, -1)
xj = x.unsqueeze(1).expand(-1, seq_len, -1, -1)
xij = torch.sigmoid(self.linear(torch.cat((xi, xj), dim=-1))).squeeze(
-1)
A_esm = self.p_Asem * xij + (1 - self.p_Asem) * adj
assert mask.shape[1] == seq_len, 'seq_len inconsistent'
mask_i = mask.unsqueeze(1).expand(-1, seq_len, -1)
mask_j = mask.unsqueeze(2).expand(-1, -1, seq_len)
A_mask = mask_i * mask_j
return A_esm.masked_fill(A_mask == 0, 1e-09)
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand([4, 4])
]
def get_init_inputs():
return [4]
|
SplitDim | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/mt/cmttqkny6sfnxpzdxvh5ov2uac5527dmp3f2ypze3tkvqwdngpc2.py
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# output => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%slice_3, %view, %slice_5], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp16 = tl.load(in_ptr1 + (0))
tmp17 = tl.broadcast_to(tmp16, [XBLOCK])
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4*x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 2, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr0 + (1 + (4*x1)), tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = 20.0
tmp12 = tmp10 > tmp11
tmp13 = tl_math.exp(tmp10)
tmp14 = libdevice.log1p(tmp13)
tmp15 = tl.where(tmp12, tmp10, tmp14)
tmp18 = tmp17 > tmp11
tmp19 = tl_math.exp(tmp17)
tmp20 = libdevice.log1p(tmp19)
tmp21 = tl.where(tmp18, tmp17, tmp20)
tmp22 = tmp15 + tmp21
tmp23 = tl.full(tmp22.shape, 0.0, tmp22.dtype)
tmp24 = tl.where(tmp9, tmp22, tmp23)
tmp25 = tmp0 >= tmp7
tmp26 = tl.full([1], 4, tl.int64)
tmp27 = tmp0 < tmp26
tmp28 = tl.load(in_ptr0 + (2 + (4*x1) + ((-2) + x0)), tmp25 & xmask, eviction_policy='evict_last', other=0.0)
tmp29 = tl.where(tmp9, tmp24, tmp28)
tmp30 = tl.where(tmp4, tmp5, tmp29)
tl.store(out_ptr0 + (x2), tmp30, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 16, grid=grid(16), stream=stream0)
del primals_1
return (buf0, primals_2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class SplitDim(nn.Module):
def __init__(self, nonlin_col=1, nonlin_type=torch.nn.functional.
softplus, correction=True):
super(SplitDim, self).__init__()
self.nonlinearity = nonlin_type
self.col = nonlin_col
if correction:
self.var = torch.nn.Parameter(torch.zeros(1))
else:
self.register_buffer('var', torch.ones(1, requires_grad=False) *
-15.0)
self.correction = correction
def forward(self, input):
transformed_output = self.nonlinearity(input[:, self.col])
transformed_output = transformed_output + self.nonlinearity(self.var)
stack_list = [input[:, :self.col], transformed_output.view(-1, 1)]
if self.col + 1 < input.size(1):
stack_list.append(input[:, self.col + 1:])
output = torch.cat(stack_list, 1)
return output
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp16 = tl.load(in_ptr1 + 0)
tmp17 = tl.broadcast_to(tmp16, [XBLOCK])
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + 4 * x1, tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 2, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr0 + (1 + 4 * x1), tmp9 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = 20.0
tmp12 = tmp10 > tmp11
tmp13 = tl_math.exp(tmp10)
tmp14 = libdevice.log1p(tmp13)
tmp15 = tl.where(tmp12, tmp10, tmp14)
tmp18 = tmp17 > tmp11
tmp19 = tl_math.exp(tmp17)
tmp20 = libdevice.log1p(tmp19)
tmp21 = tl.where(tmp18, tmp17, tmp20)
tmp22 = tmp15 + tmp21
tmp23 = tl.full(tmp22.shape, 0.0, tmp22.dtype)
tmp24 = tl.where(tmp9, tmp22, tmp23)
tmp25 = tmp0 >= tmp7
tl.full([1], 4, tl.int64)
tmp28 = tl.load(in_ptr0 + (2 + 4 * x1 + (-2 + x0)), tmp25 & xmask,
eviction_policy='evict_last', other=0.0)
tmp29 = tl.where(tmp9, tmp24, tmp28)
tmp30 = tl.where(tmp4, tmp5, tmp29)
tl.store(out_ptr0 + x2, tmp30, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(16)](primals_1, primals_2, buf0, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_1
return buf0, primals_2
class SplitDimNew(nn.Module):
def __init__(self, nonlin_col=1, nonlin_type=torch.nn.functional.
softplus, correction=True):
super(SplitDimNew, self).__init__()
self.nonlinearity = nonlin_type
self.col = nonlin_col
if correction:
self.var = torch.nn.Parameter(torch.zeros(1))
else:
self.register_buffer('var', torch.ones(1, requires_grad=False) *
-15.0)
self.correction = correction
def forward(self, input_0):
primals_2 = self.var
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
| b4thesunrise/drbayes | SplitDim | false | 6,311 | [
"BSD-2-Clause"
] | 1 | 9bc827aea2c7f084fb1ee77a4bd9f3c9726ecf8c | https://github.com/b4thesunrise/drbayes/tree/9bc827aea2c7f084fb1ee77a4bd9f3c9726ecf8c | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, nonlin_col=1, nonlin_type=torch.nn.functional.
softplus, correction=True):
super().__init__()
self.nonlinearity = nonlin_type
self.col = nonlin_col
if correction:
self.var = torch.nn.Parameter(torch.zeros(1))
else:
self.register_buffer('var', torch.ones(1, requires_grad=False) *
-15.0)
self.correction = correction
def forward(self, input):
transformed_output = self.nonlinearity(input[:, self.col])
transformed_output = transformed_output + self.nonlinearity(self.var)
stack_list = [input[:, :self.col], transformed_output.view(-1, 1)]
if self.col + 1 < input.size(1):
stack_list.append(input[:, self.col + 1:])
output = torch.cat(stack_list, 1)
return output
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return []
|
ReSentenceMatrixLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/qz/cqz2secr535r76gqduuhmyhjt4aplvu6fvum4xsh5e3ep5fhn6kv.py
# Topologically Sorted Source Nodes: [xij], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# xij => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%expand, %expand_1], -1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x4 = (xindex // 32)
x1 = (xindex // 8) % 4
x3 = (xindex // 128)
x5 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x4) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr0 + ((4*x1) + (16*x3) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x5), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/l5/cl56wlxgreyyyywasnfkqi3kjivhhlsy756qrujacc7kbh3mqvx7.py
# Topologically Sorted Source Nodes: [sigmoid, mul, A_esm], Original ATen: [aten.sigmoid, aten.mul, aten.add]
# Source node to ATen node mapping:
# A_esm => add
# mul => mul
# sigmoid => sigmoid
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%squeeze,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_4, %primals_5), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sigmoid, %mul), kwargs = {})
triton_poi_fused_add_mul_sigmoid_1 = async_compile.triton('triton_poi_fused_add_mul_sigmoid_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sigmoid_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_sigmoid_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + (0))
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp4 = tl.load(in_ptr2 + (x2), xmask)
tmp1 = tl.sigmoid(tmp0)
tmp5 = tmp3 * tmp4
tmp6 = tmp1 + tmp5
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (1, 8), (8, 1))
assert_size_stride(primals_3, (1, ), (1, ))
assert_size_stride(primals_4, (), ())
assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [xij], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, buf0, 512, grid=grid(512), stream=stream0)
del primals_1
buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (64, 8), (8, 1), 0), reinterpret_tensor(primals_2, (8, 1), (1, 8), 0), alpha=1, beta=1, out=buf2)
del primals_2
del primals_3
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sigmoid, mul, A_esm], Original ATen: [aten.sigmoid, aten.mul, aten.add]
triton_poi_fused_add_mul_sigmoid_1.run(buf2, primals_4, primals_5, buf3, 256, grid=grid(256), stream=stream0)
del primals_4
return (buf3, primals_5, reinterpret_tensor(buf0, (64, 8), (8, 1), 0), buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((), (), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class ReSentenceMatrixLayer(nn.Module):
def __init__(self, in_size, out_size=1):
super(ReSentenceMatrixLayer, self).__init__()
self.in_size = in_size
self.out_size = out_size
self.a_Asem = nn.Parameter(torch.tensor(0.0))
self.linear = nn.Linear(in_size * 2, out_size)
def forward(self, x, adj):
xi = x.unsqueeze(-2)
xi = xi.expand(xi.shape[0], xi.shape[1], xi.shape[1], xi.shape[-1])
xj = x.unsqueeze(1)
xj = xj.expand(xj.shape[0], xj.shape[2], xj.shape[2], xj.shape[-1])
xij = torch.cat((xi, xj), -1)
A_esm = torch.sigmoid(self.linear(xij).squeeze()
) + self.a_Asem * adj.to_dense()
return A_esm
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x4 = xindex // 32
x1 = xindex // 8 % 4
x3 = xindex // 128
x5 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x4 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr0 + (4 * x1 + 16 * x3 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x5, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_mul_sigmoid_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x2 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp4 = tl.load(in_ptr2 + x2, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp5 = tmp3 * tmp4
tmp6 = tmp1 + tmp5
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (1, 8), (8, 1))
assert_size_stride(primals_3, (1,), (1,))
assert_size_stride(primals_4, (), ())
assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](primals_1, buf0, 512, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_1
buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (64, 8), (
8, 1), 0), reinterpret_tensor(primals_2, (8, 1), (1, 8), 0),
alpha=1, beta=1, out=buf2)
del primals_2
del primals_3
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mul_sigmoid_1[grid(256)](buf2, primals_4,
primals_5, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_4
return buf3, primals_5, reinterpret_tensor(buf0, (64, 8), (8, 1), 0), buf2
class ReSentenceMatrixLayerNew(nn.Module):
def __init__(self, in_size, out_size=1):
super(ReSentenceMatrixLayerNew, self).__init__()
self.in_size = in_size
self.out_size = out_size
self.a_Asem = nn.Parameter(torch.tensor(0.0))
self.linear = nn.Linear(in_size * 2, out_size)
def forward(self, input_0, input_1):
primals_4 = self.a_Asem
primals_2 = self.linear.weight
primals_3 = self.linear.bias
primals_1 = input_0
primals_5 = input_1
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| ayyyq/T-LSTM | ReSentenceMatrixLayer | false | 6,312 | [
"MIT"
] | 1 | 36dbc88ac710d3925851cd87c2368ecfc7061b70 | https://github.com/ayyyq/T-LSTM/tree/36dbc88ac710d3925851cd87c2368ecfc7061b70 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, in_size, out_size=1):
super().__init__()
self.in_size = in_size
self.out_size = out_size
self.a_Asem = nn.Parameter(torch.tensor(0.0))
self.linear = nn.Linear(in_size * 2, out_size)
def forward(self, x, adj):
xi = x.unsqueeze(-2)
xi = xi.expand(xi.shape[0], xi.shape[1], xi.shape[1], xi.shape[-1])
xj = x.unsqueeze(1)
xj = xj.expand(xj.shape[0], xj.shape[2], xj.shape[2], xj.shape[-1])
xij = torch.cat((xi, xj), -1)
A_esm = torch.sigmoid(self.linear(xij).squeeze()
) + self.a_Asem * adj.to_dense()
return A_esm
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
Qnet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/md/cmd3ewacyhu5w5hausgbjbmtnt5rr66cgczh4ibdypq7dz6p4v7g.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (128, 4), (4, 1))
assert_size_stride(primals_2, (128, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (128, 128), (128, 1))
assert_size_stride(primals_5, (128, ), (1, ))
assert_size_stride(primals_6, (2, 128), (128, 1))
assert_size_stride(primals_7, (2, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0); del buf0 # reuse
buf6 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf6, 8192, grid=grid(8192), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_4, (128, 128), (1, 128), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 128), (2048, 512, 128, 1), 0); del buf2 # reuse
buf5 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf5, 8192, grid=grid(8192), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 128), (128, 1), 0), reinterpret_tensor(primals_6, (128, 2), (1, 128), 0), alpha=1, beta=1, out=buf4)
del primals_7
return (reinterpret_tensor(buf4, (4, 4, 4, 2), (32, 8, 2, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(buf3, (64, 128), (128, 1), 0), primals_6, buf5, primals_4, buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((128, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((128, 128), (128, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((2, 128), (128, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import random
import torch
import torch.nn as nn
import torch.nn.functional as F
class Qnet(nn.Module):
def __init__(self):
super(Qnet, self).__init__()
self.fc1 = nn.Linear(4, 128)
self.fc2 = nn.Linear(128, 128)
self.fc3 = nn.Linear(128, 2)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def sample_action(self, obs, epsilon):
out = self.forward(obs)
coin = random.random()
if coin < epsilon:
return random.randint(0, 1)
else:
return out.argmax().item()
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import random
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (128, 4), (4, 1))
assert_size_stride(primals_2, (128,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (128, 128), (128, 1))
assert_size_stride(primals_5, (128,), (1,))
assert_size_stride(primals_6, (2, 128), (128, 1))
assert_size_stride(primals_7, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0)
del buf0
buf6 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf1,
primals_2, buf6, 8192, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 128), (128, 1), 0),
reinterpret_tensor(primals_4, (128, 128), (1, 128), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 128), (2048, 512, 128, 1), 0)
del buf2
buf5 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf3,
primals_5, buf5, 8192, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 128),
(128, 1), 0), reinterpret_tensor(primals_6, (128, 2), (1, 128),
0), alpha=1, beta=1, out=buf4)
del primals_7
return reinterpret_tensor(buf4, (4, 4, 4, 2), (32, 8, 2, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 128), (128, 1), 0
), reinterpret_tensor(buf3, (64, 128), (128, 1), 0
), primals_6, buf5, primals_4, buf6
class QnetNew(nn.Module):
def __init__(self):
super(QnetNew, self).__init__()
self.fc1 = nn.Linear(4, 128)
self.fc2 = nn.Linear(128, 128)
self.fc3 = nn.Linear(128, 2)
def sample_action(self, obs, epsilon):
out = self.forward(obs)
coin = random.random()
if coin < epsilon:
return random.randint(0, 1)
else:
return out.argmax().item()
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| azeye/QuickstartRL | Qnet | false | 6,313 | [
"MIT"
] | 1 | ae1a9eb8bc0c5f52700fa0ac19ce5abcf3ccdefa | https://github.com/azeye/QuickstartRL/tree/ae1a9eb8bc0c5f52700fa0ac19ce5abcf3ccdefa | import random
import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(4, 128)
self.fc2 = nn.Linear(128, 128)
self.fc3 = nn.Linear(128, 2)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def sample_action(self, obs, epsilon):
out = self.forward(obs)
coin = random.random()
if coin < epsilon:
return random.randint(0, 1)
else:
return out.argmax().item()
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
HardSwish | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/4n/c4n4t4ob46tjofubvbx7kuypv4fkq77j7kjunchukdfb6wpttdx6.py
# Topologically Sorted Source Nodes: [add, relu6, inner, mul], Original ATen: [aten.add, aten.hardtanh, aten.div, aten.mul]
# Source node to ATen node mapping:
# add => add
# inner => div
# mul => mul
# relu6 => clamp_max, clamp_min
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 3.0), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add, 0), kwargs = {})
# %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 6), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%clamp_max, 6.0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %div), kwargs = {})
triton_poi_fused_add_div_hardtanh_mul_0 = async_compile.triton('triton_poi_fused_add_div_hardtanh_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_hardtanh_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_hardtanh_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 3.0
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 6.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = 0.16666666666666666
tmp8 = tmp6 * tmp7
tmp9 = tmp0 * tmp8
tl.store(out_ptr0 + (x0), tmp9, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, relu6, inner, mul], Original ATen: [aten.add, aten.hardtanh, aten.div, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_hardtanh_mul_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torchvision.transforms.functional as F
import torch.nn.functional as F
def hard_swish(x: 'torch.Tensor', inplace: 'bool'=False):
"""Hard swish."""
inner = F.relu6(x + 3.0).div_(6.0)
return x.mul_(inner) if inplace else x.mul(inner)
class HardSwish(nn.Module):
"""Custom hardswish to work with onnx."""
def __init__(self, inplace: 'bool'=False):
"""Initialize."""
super().__init__()
self.inplace = inplace
def forward(self, x: 'torch.Tensor'):
"""Forward."""
return hard_swish(x, self.inplace)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torchvision.transforms.functional as F
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_hardtanh_mul_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 3.0
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 6.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = 0.16666666666666666
tmp8 = tmp6 * tmp7
tmp9 = tmp0 * tmp8
tl.store(out_ptr0 + x0, tmp9, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_hardtanh_mul_0[grid(256)](arg0_1, buf0,
256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
def hard_swish(x: 'torch.Tensor', inplace: 'bool'=False):
"""Hard swish."""
inner = F.relu6(x + 3.0).div_(6.0)
return x.mul_(inner) if inplace else x.mul(inner)
class HardSwishNew(nn.Module):
"""Custom hardswish to work with onnx."""
def __init__(self, inplace: 'bool'=False):
"""Initialize."""
super().__init__()
self.inplace = inplace
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| bcaitech1/p4-mod-model_diet | HardSwish | false | 6,314 | [
"MIT"
] | 1 | 36d8a747e12c375b07d132ed4d08f9fc77126a8b | https://github.com/bcaitech1/p4-mod-model_diet/tree/36d8a747e12c375b07d132ed4d08f9fc77126a8b | import torch
import torch.nn as nn
import torchvision.transforms.functional as F
import torch.nn.functional as F
def hard_swish(x: 'torch.Tensor', inplace: 'bool'=False):
"""Hard swish."""
inner = F.relu6(x + 3.0).div_(6.0)
return x.mul_(inner) if inplace else x.mul(inner)
class Model(nn.Module):
"""Custom hardswish to work with onnx."""
def __init__(self, inplace: 'bool'=False):
"""Initialize."""
super().__init__()
self.inplace = inplace
def forward(self, x: 'torch.Tensor'):
"""Forward."""
return hard_swish(x, self.inplace)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
PoswiseFeedForwardNet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/iu/ciuxern2omgit5ovksuiwlddxkww6e3pkid4q2h3sauzn5rbd35z.py
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv1d => convolution
# Graph fragment:
# %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%permute, %primals_2, %primals_3, [1], [0], [1], False, [0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ra/craakqvyalqrlofntsbxdzl27qiaxg5dww5x34xbt3jypyc5o3px.py
# Topologically Sorted Source Nodes: [conv1d, output], Original ATen: [aten.convolution, aten.gelu]
# Source node to ATen node mapping:
# conv1d => convolution
# output => add, erf, mul, mul_1, mul_2
# Graph fragment:
# %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%permute, %primals_2, %primals_3, [1], [0], [1], False, [0], 1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.5), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.7071067811865476), kwargs = {})
# %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%mul_1,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1), kwargs = {})
# %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %add), kwargs = {})
triton_poi_fused_convolution_gelu_1 = async_compile.triton('triton_poi_fused_convolution_gelu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_gelu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_gelu_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = 0.7071067811865476
tmp6 = tmp2 * tmp5
tmp7 = libdevice.erf(tmp6)
tmp8 = 1.0
tmp9 = tmp7 + tmp8
tmp10 = tmp4 * tmp9
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
tl.store(out_ptr0 + (x3), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/lf/clf7hs52i4bd5d3e73uio27ntyjfqmszkbsw6dta3r6rzgeftva3.py
# Topologically Sorted Source Nodes: [conv1d_1], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv1d_1 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%mul_2, %primals_4, %primals_5, [1], [0], [1], False, [0], 1), kwargs = {})
triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(primals_1, buf0, 16, 4, grid=grid(16, 4), stream=stream0)
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4), (16, 4, 1))
buf2 = buf1; del buf1 # reuse
buf3 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv1d, output], Original ATen: [aten.convolution, aten.gelu]
triton_poi_fused_convolution_gelu_1.run(buf2, primals_3, buf3, 64, grid=grid(64), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [conv1d_1], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4), (16, 4, 1))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [conv1d_1], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf5, primals_5, 64, grid=grid(64), stream=stream0)
del primals_5
return (reinterpret_tensor(buf5, (4, 4, 4), (16, 1, 4), 0), primals_2, primals_4, reinterpret_tensor(primals_1, (4, 4, 4), (16, 1, 4), 0), buf2, buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
import torch.nn.functional as F
class PoswiseFeedForwardNet(nn.Module):
""" feed forward """
def __init__(self, config):
super().__init__()
self.config = config
self.conv1 = nn.Conv1d(in_channels=self.config.d_hidn, out_channels
=self.config.d_ff, kernel_size=1)
self.conv2 = nn.Conv1d(in_channels=self.config.d_ff, out_channels=
self.config.d_hidn, kernel_size=1)
self.active = F.gelu
self.dropout = nn.Dropout(config.dropout)
def forward(self, inputs):
output = self.active(self.conv1(inputs.transpose(1, 2)))
output = self.conv2(output).transpose(1, 2)
output = self.dropout(output)
return output
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(d_hidn=4, d_ff=4, dropout=0.5)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_gelu_1(in_out_ptr0, in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = 0.7071067811865476
tmp6 = tmp2 * tmp5
tmp7 = libdevice.erf(tmp6)
tmp8 = 1.0
tmp9 = tmp7 + tmp8
tmp10 = tmp4 * tmp9
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(16, 4)](primals_1, buf0, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4), (16, 4, 1))
buf2 = buf1
del buf1
buf3 = buf0
del buf0
triton_poi_fused_convolution_gelu_1[grid(64)](buf2, primals_3, buf3,
64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_3
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4), (16, 4, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_2[grid(64)](buf5, primals_5, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_5
return reinterpret_tensor(buf5, (4, 4, 4), (16, 1, 4), 0
), primals_2, primals_4, reinterpret_tensor(primals_1, (4, 4, 4), (
16, 1, 4), 0), buf2, buf3
class PoswiseFeedForwardNetNew(nn.Module):
""" feed forward """
def __init__(self, config):
super().__init__()
self.config = config
self.conv1 = nn.Conv1d(in_channels=self.config.d_hidn, out_channels
=self.config.d_ff, kernel_size=1)
self.conv2 = nn.Conv1d(in_channels=self.config.d_ff, out_channels=
self.config.d_hidn, kernel_size=1)
self.active = F.gelu
self.dropout = nn.Dropout(config.dropout)
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| bage79/transformer-evolution-bage | PoswiseFeedForwardNet | false | 6,315 | [
"Apache-2.0"
] | 1 | 715bdf61421dc19e21fb0f66bfa4b564305987f8 | https://github.com/bage79/transformer-evolution-bage/tree/715bdf61421dc19e21fb0f66bfa4b564305987f8 | from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
""" feed forward """
def __init__(self, config):
super().__init__()
self.config = config
self.conv1 = nn.Conv1d(in_channels=self.config.d_hidn, out_channels
=self.config.d_ff, kernel_size=1)
self.conv2 = nn.Conv1d(in_channels=self.config.d_ff, out_channels=
self.config.d_hidn, kernel_size=1)
self.active = F.gelu
self.dropout = nn.Dropout(config.dropout)
def forward(self, inputs):
output = self.active(self.conv1(inputs.transpose(1, 2)))
output = self.conv2(output).transpose(1, 2)
output = self.dropout(output)
return output
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return []
|
CNNHead | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/iu/ciuxern2omgit5ovksuiwlddxkww6e3pkid4q2h3sauzn5rbd35z.py
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv1d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%permute, %primals_2, %primals_3, [1], [1], [1], False, [0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ad/cadvlssrbouaecvoc3nrmojykt35635tehcswqq5zpxprq6ce5yu.py
# Topologically Sorted Source Nodes: [conv1d, relu], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv1d => convolution
# relu => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%permute, %primals_2, %primals_3, [1], [1], [1], False, [0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 2
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
tl.store(out_ptr0 + (x3), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (2, 4, 3), (12, 3, 1))
assert_size_stride(primals_3, (2, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(primals_1, buf0, 16, 4, grid=grid(16, 4), stream=stream0)
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 2, 4), (8, 4, 1))
del buf0
buf2 = buf1; del buf1 # reuse
buf3 = empty_strided_cuda((4, 2, 4), (8, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv1d, relu], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_1.run(buf2, primals_3, buf3, 32, grid=grid(32), stream=stream0)
del primals_3
return (reinterpret_tensor(buf2, (4, 4, 2), (8, 1, 4), 0), primals_2, reinterpret_tensor(primals_1, (4, 4, 4), (16, 1, 4), 0), buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((2, 4, 3), (12, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class CNNHead(nn.Module):
def __init__(self, input_dim):
super().__init__()
self.conv = nn.Conv1d(in_channels=input_dim, out_channels=2,
kernel_size=3, padding=1)
self.relu = nn.ReLU()
def forward(self, x):
return self.relu(self.conv(x.transpose(1, 2))).transpose(1, 2)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_1(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 2
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr0 + x3, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (2, 4, 3), (12, 3, 1))
assert_size_stride(primals_3, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(16, 4)](primals_1, buf0, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,),
padding=(1,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 2, 4), (8, 4, 1))
del buf0
buf2 = buf1
del buf1
buf3 = empty_strided_cuda((4, 2, 4), (8, 4, 1), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_1[grid(32)](buf2,
primals_3, buf3, 32, XBLOCK=32, num_warps=1, num_stages=1)
del primals_3
return reinterpret_tensor(buf2, (4, 4, 2), (8, 1, 4), 0
), primals_2, reinterpret_tensor(primals_1, (4, 4, 4), (16, 1, 4), 0
), buf3
class CNNHeadNew(nn.Module):
def __init__(self, input_dim):
super().__init__()
self.conv = nn.Conv1d(in_channels=input_dim, out_channels=2,
kernel_size=3, padding=1)
self.relu = nn.ReLU()
def forward(self, input_0):
primals_2 = self.conv.weight
primals_3 = self.conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| baseballChatbot7/KBO_MRC | CNNHead | false | 6,316 | [
"MIT"
] | 1 | ad11318d785bacdf29a12adfd25afe90d7ff2779 | https://github.com/baseballChatbot7/KBO_MRC/tree/ad11318d785bacdf29a12adfd25afe90d7ff2779 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, input_dim):
super().__init__()
self.conv = nn.Conv1d(in_channels=input_dim, out_channels=2,
kernel_size=3, padding=1)
self.relu = nn.ReLU()
def forward(self, x):
return self.relu(self.conv(x.transpose(1, 2))).transpose(1, 2)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [4]
|
IdentityMessage | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/hx/chxwkpiemkkyykyxqzu6jzdqolo4hpw7hddgfyirazvjwohtttab.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%arg0_1, %arg1_1, %arg2_1, %arg3_1], -1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr2 + ((4*x1) + ((-8) + x0)), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp16 = tmp0 >= tmp12
tmp17 = tl.full([1], 16, tl.int64)
tmp18 = tmp0 < tmp17
tmp19 = tl.load(in_ptr3 + ((4*x1) + ((-12) + x0)), tmp16 & xmask, eviction_policy='evict_last', other=0.0)
tmp20 = tl.where(tmp14, tmp15, tmp19)
tmp21 = tl.where(tmp9, tmp10, tmp20)
tmp22 = tl.where(tmp4, tmp5, tmp21)
tl.store(out_ptr0 + (x2), tmp22, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(arg0_1, arg1_1, arg2_1, arg3_1, buf0, 1024, grid=grid(1024), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg3_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
class IdentityMessage(torch.nn.Module):
def __init__(self, raw_msg_dim: 'int', memory_dim: 'int', time_dim: 'int'):
super(IdentityMessage, self).__init__()
self.out_channels = raw_msg_dim + 2 * memory_dim + time_dim
def forward(self, z_src, z_dst, raw_msg, t_enc):
return torch.cat([z_src, z_dst, raw_msg, t_enc], dim=-1)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'raw_msg_dim': 4, 'memory_dim': 4, 'time_dim': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp9 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr2 + (4 * x1 + (-8 + x0)), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tmp0 >= tmp12
tl.full([1], 16, tl.int64)
tmp19 = tl.load(in_ptr3 + (4 * x1 + (-12 + x0)), tmp16 & xmask,
eviction_policy='evict_last', other=0.0)
tmp20 = tl.where(tmp14, tmp15, tmp19)
tmp21 = tl.where(tmp9, tmp10, tmp20)
tmp22 = tl.where(tmp4, tmp5, tmp21)
tl.store(out_ptr0 + x2, tmp22, xmask)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.
float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(1024)](arg0_1, arg1_1, arg2_1, arg3_1,
buf0, 1024, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
return buf0,
class IdentityMessageNew(torch.nn.Module):
def __init__(self, raw_msg_dim: 'int', memory_dim: 'int', time_dim: 'int'):
super(IdentityMessageNew, self).__init__()
self.out_channels = raw_msg_dim + 2 * memory_dim + time_dim
def forward(self, input_0, input_1, input_2, input_3):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
arg3_1 = input_3
output = call([arg0_1, arg1_1, arg2_1, arg3_1])
return output[0]
| beneisner/pytorch_geometric | IdentityMessage | false | 6,317 | [
"MIT"
] | 1 | 53d44a96bd2de2753b1ab1d7153c026c92606a81 | https://github.com/beneisner/pytorch_geometric/tree/53d44a96bd2de2753b1ab1d7153c026c92606a81 | import torch
import torch.utils.data
class Model(torch.nn.Module):
def __init__(self, raw_msg_dim: 'int', memory_dim: 'int', time_dim: 'int'):
super().__init__()
self.out_channels = raw_msg_dim + 2 * memory_dim + time_dim
def forward(self, z_src, z_dst, raw_msg, t_enc):
return torch.cat([z_src, z_dst, raw_msg, t_enc], dim=-1)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
SigmoidDiceLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/gd/cgdtd7ki7lurypoeyfwjebdfquygdeupjef4ltfbbbdk5u7owcpl.py
# Topologically Sorted Source Nodes: [intersection, sum_1, sum_2, sum_3], Original ATen: [aten.mul, aten.sum]
# Source node to ATen node mapping:
# intersection => mul
# sum_1 => sum_1
# sum_2 => sum_2
# sum_3 => sum_3
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %view_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view, [1]), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view_1, [1]), kwargs = {})
triton_per_fused_mul_sum_0 = async_compile.triton('triton_per_fused_mul_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mul_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0)
tmp2 = tl.load(in_ptr1 + (r1 + (64*x0)), xmask, other=0.0)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp10 = tl.where(xmask, tmp8, 0)
tmp11 = tl.sum(tmp10, 1)[:, None]
tmp12 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp14 = tl.where(xmask, tmp12, 0)
tmp15 = tl.sum(tmp14, 1)[:, None]
tl.store(out_ptr0 + (x0), tmp7, xmask)
tl.store(out_ptr1 + (x0), tmp11, xmask)
tl.store(out_ptr2 + (x0), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/vq/cvqiixp4wmb73ig2cla6idbqq7i6vd5n3qmdluadrv32f52pdgw3.py
# Topologically Sorted Source Nodes: [add, mul_1, add_1, add_2, truediv, sum_4, truediv_1, loss], Original ATen: [aten.add, aten.mul, aten.div, aten.sum, aten.rsub]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# add_2 => add_2
# loss => sub
# mul_1 => mul_1
# sum_4 => sum_4
# truediv => div
# truediv_1 => div_1
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1.0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, %sum_3), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, 1.0), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_1, %add_2), kwargs = {})
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%div,), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_4, 4), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div_1), kwargs = {})
triton_per_fused_add_div_mul_rsub_sum_1 = async_compile.triton('triton_per_fused_add_div_mul_rsub_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mul_rsub_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mul_rsub_sum_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp5 = tl.load(in_ptr1 + (r0), None)
tmp6 = tl.load(in_ptr2 + (r0), None)
tmp1 = 1.0
tmp2 = tmp0 + tmp1
tmp3 = 2.0
tmp4 = tmp2 * tmp3
tmp7 = tmp5 + tmp6
tmp8 = tmp7 + tmp1
tmp9 = tmp4 / tmp8
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = tl.sum(tmp10, 1)[:, None]
tmp13 = 0.25
tmp14 = tmp12 * tmp13
tmp15 = tmp1 - tmp14
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp15, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf1 = empty_strided_cuda((4, ), (1, ), torch.float32)
buf2 = empty_strided_cuda((4, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [intersection, sum_1, sum_2, sum_3], Original ATen: [aten.mul, aten.sum]
stream0 = get_raw_stream(0)
triton_per_fused_mul_sum_0.run(arg0_1, arg1_1, buf0, buf1, buf2, 4, 64, grid=grid(4), stream=stream0)
del arg0_1
del arg1_1
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [add, mul_1, add_1, add_2, truediv, sum_4, truediv_1, loss], Original ATen: [aten.add, aten.mul, aten.div, aten.sum, aten.rsub]
triton_per_fused_add_div_mul_rsub_sum_1.run(buf4, buf0, buf1, buf2, 1, 4, grid=grid(1), stream=stream0)
del buf0
del buf1
del buf2
return (buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class DiceLoss(nn.Module):
def __init__(self, smooth=1.0):
super(DiceLoss, self).__init__()
self.smooth = smooth
def forward(self, input, target):
n = input.shape[0]
input = input.view(n, -1)
target = target.view(n, -1)
intersection = input * target
loss = 1 - (2 * (intersection.sum(1) + self.smooth) / (input.sum(1) +
target.sum(1) + self.smooth)).sum() / n
return loss
class SigmoidDiceLoss(nn.Module):
def __init__(self, smooth=1.0):
super(SigmoidDiceLoss, self).__init__()
self.sigmoid = nn.Sigmoid()
self.dice = DiceLoss(smooth=smooth)
def forward(self, input, target):
input = self.sigmoid(input)
loss = self.dice(input, target)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp2 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp10 = tl.where(xmask, tmp8, 0)
tmp11 = tl.sum(tmp10, 1)[:, None]
tmp12 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp14 = tl.where(xmask, tmp12, 0)
tmp15 = tl.sum(tmp14, 1)[:, None]
tl.store(out_ptr0 + x0, tmp7, xmask)
tl.store(out_ptr1 + x0, tmp11, xmask)
tl.store(out_ptr2 + x0, tmp15, xmask)
@triton.jit
def triton_per_fused_add_div_mul_rsub_sum_1(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp5 = tl.load(in_ptr1 + r0, None)
tmp6 = tl.load(in_ptr2 + r0, None)
tmp1 = 1.0
tmp2 = tmp0 + tmp1
tmp3 = 2.0
tmp4 = tmp2 * tmp3
tmp7 = tmp5 + tmp6
tmp8 = tmp7 + tmp1
tmp9 = tmp4 / tmp8
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = tl.sum(tmp10, 1)[:, None]
tmp13 = 0.25
tmp14 = tmp12 * tmp13
tmp15 = tmp1 - tmp14
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp15, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.float32)
buf1 = empty_strided_cuda((4,), (1,), torch.float32)
buf2 = empty_strided_cuda((4,), (1,), torch.float32)
get_raw_stream(0)
triton_per_fused_mul_sum_0[grid(4)](arg0_1, arg1_1, buf0, buf1,
buf2, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf3
del buf3
triton_per_fused_add_div_mul_rsub_sum_1[grid(1)](buf4, buf0, buf1,
buf2, 1, 4, XBLOCK=1, num_warps=2, num_stages=1)
del buf0
del buf1
del buf2
return buf4,
class DiceLoss(nn.Module):
def __init__(self, smooth=1.0):
super(DiceLoss, self).__init__()
self.smooth = smooth
def forward(self, input, target):
n = input.shape[0]
input = input.view(n, -1)
target = target.view(n, -1)
intersection = input * target
loss = 1 - (2 * (intersection.sum(1) + self.smooth) / (input.sum(1) +
target.sum(1) + self.smooth)).sum() / n
return loss
class SigmoidDiceLossNew(nn.Module):
def __init__(self, smooth=1.0):
super(SigmoidDiceLossNew, self).__init__()
self.sigmoid = nn.Sigmoid()
self.dice = DiceLoss(smooth=smooth)
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| azxj/BRRNet | SigmoidDiceLoss | false | 6,318 | [
"MIT"
] | 1 | 274068efd5453f2c1fb07bfaad448d048b9c793b | https://github.com/azxj/BRRNet/tree/274068efd5453f2c1fb07bfaad448d048b9c793b | import torch
import torch.nn as nn
class DiceLoss(nn.Module):
def __init__(self, smooth=1.0):
super().__init__()
self.smooth = smooth
def forward(self, input, target):
n = input.shape[0]
input = input.view(n, -1)
target = target.view(n, -1)
intersection = input * target
loss = 1 - (2 * (intersection.sum(1) + self.smooth) / (input.sum(1) +
target.sum(1) + self.smooth)).sum() / n
return loss
class Model(nn.Module):
def __init__(self, smooth=1.0):
super().__init__()
self.sigmoid = nn.Sigmoid()
self.dice = DiceLoss(smooth=smooth)
def forward(self, input, target):
input = self.sigmoid(input)
loss = self.dice(input, target)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
PolicyModuleAlt | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/tf/ctfobpckmiv3kkga3a6gzs6unuclcnxpb4xc2h5r3udgxgix4ip5.py
# Topologically Sorted Source Nodes: [hidden], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# hidden => relu
# Graph fragment:
# %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_3), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {})
triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (1, 4), (4, 1))
assert_size_stride(primals_9, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [hidden], Original ATen: [aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_0.run(buf1, primals_3, 16, grid=grid(16), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [hidden_1], Original ATen: [aten.relu]
triton_poi_fused_relu_0.run(buf3, primals_5, 16, grid=grid(16), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [action], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4)
del primals_7
buf6 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [value], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, buf3, reinterpret_tensor(primals_8, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf6)
del primals_9
return (buf4, buf6, primals_1, buf1, buf3, primals_8, primals_6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class PolicyModuleAlt(nn.Module):
def __init__(self, input_dim, hid_dim, n_actions):
super().__init__()
self.fc_1 = nn.Linear(input_dim, hid_dim)
self.fc_2 = nn.Linear(hid_dim, hid_dim)
self.fc_a = nn.Linear(hid_dim, n_actions)
self.fc_v = nn.Linear(hid_dim, 1)
def forward(self, image):
batch_size, *_ = image.shape
image = image.reshape(batch_size, -1)
hidden = F.relu(self.fc_1(image))
hidden = F.relu(self.fc_2(hidden))
action = self.fc_a(hidden)
value = self.fc_v(hidden)
return action, value
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'hid_dim': 4, 'n_actions': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (1, 4), (4, 1))
assert_size_stride(primals_9, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4),
(1, 4), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(16)](buf1, primals_3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (4, 4), (1, 4
), 0), out=buf2)
buf3 = buf2
del buf2
triton_poi_fused_relu_0[grid(16)](buf3, primals_5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4)
del primals_7
buf6 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_9, buf3, reinterpret_tensor(primals_8,
(4, 1), (1, 4), 0), alpha=1, beta=1, out=buf6)
del primals_9
return buf4, buf6, primals_1, buf1, buf3, primals_8, primals_6, primals_4
class PolicyModuleAltNew(nn.Module):
def __init__(self, input_dim, hid_dim, n_actions):
super().__init__()
self.fc_1 = nn.Linear(input_dim, hid_dim)
self.fc_2 = nn.Linear(hid_dim, hid_dim)
self.fc_a = nn.Linear(hid_dim, n_actions)
self.fc_v = nn.Linear(hid_dim, 1)
def forward(self, input_0):
primals_1 = self.fc_1.weight
primals_3 = self.fc_1.bias
primals_2 = self.fc_2.weight
primals_5 = self.fc_2.bias
primals_4 = self.fc_a.weight
primals_7 = self.fc_a.bias
primals_8 = self.fc_v.weight
primals_9 = self.fc_v.bias
primals_6 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0], output[1]
| bentrevett/task-oriented-language-grounding | PolicyModuleAlt | false | 6,319 | [
"MIT"
] | 1 | 812a7bc21ee622030eb0594c576c7d60dc630148 | https://github.com/bentrevett/task-oriented-language-grounding/tree/812a7bc21ee622030eb0594c576c7d60dc630148 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, input_dim, hid_dim, n_actions):
super().__init__()
self.fc_1 = nn.Linear(input_dim, hid_dim)
self.fc_2 = nn.Linear(hid_dim, hid_dim)
self.fc_a = nn.Linear(hid_dim, n_actions)
self.fc_v = nn.Linear(hid_dim, 1)
def forward(self, image):
batch_size, *_ = image.shape
image = image.reshape(batch_size, -1)
hidden = F.relu(self.fc_1(image))
hidden = F.relu(self.fc_2(hidden))
action = self.fc_a(hidden)
value = self.fc_v(hidden)
return action, value
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
feedforwardLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/7b/c7bsbu5nqjwno7oolhruidochm2rierdki7fkzahol2dvs7dgv5t.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# x => add, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_1, [3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-06), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-06
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/lh/clhh73owbiuj4adasmetdqsot2nlmw2ljupnw2q4yt3du76mikww.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# x => add, add_1, mul, mul_1, rsqrt, sub, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_1, [3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-06), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_3), kwargs = {})
triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/gm/cgmflgdlpeeb52xctoa47uvw47ycyf7ahlj5wdscxdatpbwcboco.py
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# relu => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_2 = async_compile.triton('triton_poi_fused_relu_threshold_backward_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/b5/cb5kfengtwtso2cktophcv2j56lrz4uz52si4koheb5liewfkvtt.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.add]
# Source node to ATen node mapping:
# x_2 => add_2
# Graph fragment:
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %view_3), kwargs = {})
triton_poi_fused_add_3 = async_compile.triton('triton_poi_fused_add_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_out_ptr0 + (x2), xmask)
tmp2 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.native_layer_norm]
stream0 = get_raw_stream(0)
triton_poi_fused_native_layer_norm_0.run(primals_1, buf0, buf1, 64, grid=grid(64), stream=stream0)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_1.run(primals_1, buf0, buf1, primals_2, primals_3, buf2, 256, grid=grid(256), stream=stream0)
del buf0
del buf1
del primals_2
del primals_3
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3)
buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf3 # reuse
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_2.run(buf4, primals_5, buf7, 256, grid=grid(256), stream=stream0)
del primals_5
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf4, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf5)
buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.add]
triton_poi_fused_add_3.run(buf6, primals_1, primals_7, 256, grid=grid(256), stream=stream0)
del primals_7
return (buf6, primals_1, reinterpret_tensor(buf2, (64, 4), (4, 1), 0), reinterpret_tensor(buf4, (64, 4), (4, 1), 0), primals_6, buf7, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class feedforwardLayer(nn.Module):
""" A two-feed-forward-layer module """
def __init__(self, d_in, d_hid, dropout=0.3):
super().__init__()
self.w_1 = nn.Linear(d_in, d_hid)
self.w_2 = nn.Linear(d_hid, d_in)
self.layer_norm = nn.LayerNorm(d_in, eps=1e-06)
self.dropout = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
def forward(self, x):
residual = x
x = self.layer_norm(x)
x = self.w_2(self.dropout(F.relu(self.w_1(x))))
x = residual + self.dropout2(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_in': 4, 'd_hid': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-06
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_3(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_out_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(64)](primals_1, buf0,
buf1, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(256)](primals_1, buf0,
buf1, primals_2, primals_3, buf2, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del buf0
del buf1
del primals_2
del primals_3
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3)
buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf3
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(256)](buf4,
primals_5, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf4, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf5)
buf6 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused_add_3[grid(256)](buf6, primals_1, primals_7, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_7
return buf6, primals_1, reinterpret_tensor(buf2, (64, 4), (4, 1), 0
), reinterpret_tensor(buf4, (64, 4), (4, 1), 0
), primals_6, buf7, primals_4
class feedforwardLayerNew(nn.Module):
""" A two-feed-forward-layer module """
def __init__(self, d_in, d_hid, dropout=0.3):
super().__init__()
self.w_1 = nn.Linear(d_in, d_hid)
self.w_2 = nn.Linear(d_hid, d_in)
self.layer_norm = nn.LayerNorm(d_in, eps=1e-06)
self.dropout = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
def forward(self, input_0):
primals_4 = self.w_1.weight
primals_2 = self.w_1.bias
primals_6 = self.w_2.weight
primals_3 = self.w_2.bias
primals_5 = self.layer_norm.weight
primals_7 = self.layer_norm.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| ayyyq/T-LSTM | feedforwardLayer | false | 6,320 | [
"MIT"
] | 1 | 36dbc88ac710d3925851cd87c2368ecfc7061b70 | https://github.com/ayyyq/T-LSTM/tree/36dbc88ac710d3925851cd87c2368ecfc7061b70 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
""" A two-feed-forward-layer module """
def __init__(self, d_in, d_hid, dropout=0.3):
super().__init__()
self.w_1 = nn.Linear(d_in, d_hid)
self.w_2 = nn.Linear(d_hid, d_in)
self.layer_norm = nn.LayerNorm(d_in, eps=1e-06)
self.dropout = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
def forward(self, x):
residual = x
x = self.layer_norm(x)
x = self.w_2(self.dropout(F.relu(self.w_1(x))))
x = residual + self.dropout2(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
Envelope | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/yy/cyymfekslkvatstvfrqhzuk22uwn2nab3s7atecdigv6km5ac32u.py
# Topologically Sorted Source Nodes: [truediv, x_pow_p0, mul_2, add, x_pow_p1, mul_3, add_1, x_pow_p2, mul_4, add_2], Original ATen: [aten.reciprocal, aten.mul, aten.pow, aten.add]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# add_2 => add_2
# mul_2 => mul_3
# mul_3 => mul_4
# mul_4 => mul_5
# truediv => mul_2, reciprocal
# x_pow_p0 => pow_1
# x_pow_p1 => mul
# x_pow_p2 => mul_1
# Graph fragment:
# %reciprocal : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%arg0_1,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%reciprocal, 1.0), kwargs = {})
# %pow_1 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 4), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, -21.0), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %mul_3), kwargs = {})
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, %arg0_1), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, 35), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %mul_4), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %arg0_1), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, -15.0), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %mul_5), kwargs = {})
triton_poi_fused_add_mul_pow_reciprocal_0 = async_compile.triton('triton_poi_fused_add_mul_pow_reciprocal_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_pow_reciprocal_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_pow_reciprocal_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.full([1], 1, tl.int32)
tmp2 = tmp1 / tmp0
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tmp5 = tmp0 * tmp0
tmp6 = tmp5 * tmp5
tmp7 = -21.0
tmp8 = tmp6 * tmp7
tmp9 = tmp4 + tmp8
tmp10 = tmp6 * tmp0
tmp11 = 35.0
tmp12 = tmp10 * tmp11
tmp13 = tmp9 + tmp12
tmp14 = tmp10 * tmp0
tmp15 = -15.0
tmp16 = tmp14 * tmp15
tmp17 = tmp13 + tmp16
tl.store(out_ptr0 + (x0), tmp17, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [truediv, x_pow_p0, mul_2, add, x_pow_p1, mul_3, add_1, x_pow_p2, mul_4, add_2], Original ATen: [aten.reciprocal, aten.mul, aten.pow, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_pow_reciprocal_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
class Envelope(torch.nn.Module):
def __init__(self, exponent):
super(Envelope, self).__init__()
self.p = exponent + 1
self.a = -(self.p + 1) * (self.p + 2) / 2
self.b = self.p * (self.p + 2)
self.c = -self.p * (self.p + 1) / 2
def forward(self, x):
p, a, b, c = self.p, self.a, self.b, self.c
x_pow_p0 = x.pow(p - 1)
x_pow_p1 = x_pow_p0 * x
x_pow_p2 = x_pow_p1 * x
return 1.0 / x + a * x_pow_p0 + b * x_pow_p1 + c * x_pow_p2
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'exponent': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_pow_reciprocal_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.full([1], 1, tl.int32)
tmp2 = tmp1 / tmp0
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tmp5 = tmp0 * tmp0
tmp6 = tmp5 * tmp5
tmp7 = -21.0
tmp8 = tmp6 * tmp7
tmp9 = tmp4 + tmp8
tmp10 = tmp6 * tmp0
tmp11 = 35.0
tmp12 = tmp10 * tmp11
tmp13 = tmp9 + tmp12
tmp14 = tmp10 * tmp0
tmp15 = -15.0
tmp16 = tmp14 * tmp15
tmp17 = tmp13 + tmp16
tl.store(out_ptr0 + x0, tmp17, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_pow_reciprocal_0[grid(256)](arg0_1, buf0,
256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class EnvelopeNew(torch.nn.Module):
def __init__(self, exponent):
super(EnvelopeNew, self).__init__()
self.p = exponent + 1
self.a = -(self.p + 1) * (self.p + 2) / 2
self.b = self.p * (self.p + 2)
self.c = -self.p * (self.p + 1) / 2
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| beneisner/pytorch_geometric | Envelope | false | 6,321 | [
"MIT"
] | 1 | 53d44a96bd2de2753b1ab1d7153c026c92606a81 | https://github.com/beneisner/pytorch_geometric/tree/53d44a96bd2de2753b1ab1d7153c026c92606a81 | import torch
import torch.utils.data
class Model(torch.nn.Module):
def __init__(self, exponent):
super().__init__()
self.p = exponent + 1
self.a = -(self.p + 1) * (self.p + 2) / 2
self.b = self.p * (self.p + 2)
self.c = -self.p * (self.p + 1) / 2
def forward(self, x):
p, a, b, c = self.p, self.a, self.b, self.c
x_pow_p0 = x.pow(p - 1)
x_pow_p1 = x_pow_p0 * x
x_pow_p2 = x_pow_p1 * x
return 1.0 / x + a * x_pow_p0 + b * x_pow_p1 + c * x_pow_p2
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
CR | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/la/clat6ut24lox6cbq4fgoo3cuozyhn4s3euktinoxgen532ugt6y5.py
# Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv2d => convolution
# relu => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [2], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 25) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
tl.store(out_ptr0 + (x3), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(2,), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 5, 5), (100, 25, 5, 1))
buf1 = buf0; del buf0 # reuse
buf2 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_threshold_backward_0.run(buf1, primals_2, buf2, 400, grid=grid(400), stream=stream0)
del primals_2
return (buf1, primals_1, primals_3, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from typing import List
from typing import Union
import torch.nn as nn
def autopad(kernel_size: 'Union[int, List[int]]', padding:
'Union[int, None]'=None) ->Union[int, List[int]]:
"""Auto padding calculation for pad='same' in TensorFlow."""
if isinstance(kernel_size, int):
kernel_size = [kernel_size]
return padding or [(x // 2) for x in kernel_size]
class Activation:
"""Convert string activation name to the activation class."""
def __init__(self, act_type: 'Union[str, None]') ->None:
"""Convert string activation name to the activation class.
Args:
type: Activation name.
Returns:
nn.Identity if {type} is None.
"""
self.type = act_type
self.args = [1] if self.type == 'Softmax' else []
def __call__(self) ->nn.Module:
if self.type is None:
return nn.Identity()
elif hasattr(nn, self.type):
return getattr(nn, self.type)(*self.args)
else:
return getattr(__import__('src.modules.activations', fromlist=[
'']), self.type)()
class CR(nn.Module):
"""Standard convolution with batch normalization and activation."""
def __init__(self, in_channel: 'int', out_channels: 'int', kernel_size:
'int', stride: 'int'=1, padding: 'Union[int, None]'=None, groups:
'int'=1, activation: 'Union[str, None]'='ReLU') ->None:
"""Standard convolution with batch normalization and activation.
Args:
in_channel: input channels.
out_channels: output channels.
kernel_size: kernel size.
stride: stride.
padding: input padding. If None is given, autopad is applied
which is identical to padding='SAME' in TensorFlow.
groups: group convolution.
activation: activation name. If None is given, nn.Identity is applied
which is no activation.
"""
super().__init__()
self.conv = nn.Conv2d(in_channel, out_channels, kernel_size, stride,
padding=autopad(kernel_size, padding), groups=groups, bias=True)
self.act = Activation(activation)()
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
"""Forward."""
return self.act(self.conv(x))
def fusefoward(self, x: 'torch.Tensor') ->torch.Tensor:
"""Fuse forward."""
return self.act(self.conv(x))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channel': 4, 'out_channels': 4, 'kernel_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from typing import List
from typing import Union
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 25 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr0 + x3, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(2,), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 5, 5), (100, 25, 5, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_convolution_relu_threshold_backward_0[grid(400)](buf1,
primals_2, buf2, 400, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
return buf1, primals_1, primals_3, buf2
def autopad(kernel_size: 'Union[int, List[int]]', padding:
'Union[int, None]'=None) ->Union[int, List[int]]:
"""Auto padding calculation for pad='same' in TensorFlow."""
if isinstance(kernel_size, int):
kernel_size = [kernel_size]
return padding or [(x // 2) for x in kernel_size]
class Activation:
"""Convert string activation name to the activation class."""
def __init__(self, act_type: 'Union[str, None]') ->None:
"""Convert string activation name to the activation class.
Args:
type: Activation name.
Returns:
nn.Identity if {type} is None.
"""
self.type = act_type
self.args = [1] if self.type == 'Softmax' else []
def __call__(self) ->nn.Module:
if self.type is None:
return nn.Identity()
elif hasattr(nn, self.type):
return getattr(nn, self.type)(*self.args)
else:
return getattr(__import__('src.modules.activations', fromlist=[
'']), self.type)()
class CRNew(nn.Module):
"""Standard convolution with batch normalization and activation."""
def __init__(self, in_channel: 'int', out_channels: 'int', kernel_size:
'int', stride: 'int'=1, padding: 'Union[int, None]'=None, groups:
'int'=1, activation: 'Union[str, None]'='ReLU') ->None:
"""Standard convolution with batch normalization and activation.
Args:
in_channel: input channels.
out_channels: output channels.
kernel_size: kernel size.
stride: stride.
padding: input padding. If None is given, autopad is applied
which is identical to padding='SAME' in TensorFlow.
groups: group convolution.
activation: activation name. If None is given, nn.Identity is applied
which is no activation.
"""
super().__init__()
self.conv = nn.Conv2d(in_channel, out_channels, kernel_size, stride,
padding=autopad(kernel_size, padding), groups=groups, bias=True)
self.act = Activation(activation)()
def fusefoward(self, x: 'torch.Tensor') ->torch.Tensor:
"""Fuse forward."""
return self.act(self.conv(x))
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| bcaitech1/p4-mod-model_diet | CR | false | 6,322 | [
"MIT"
] | 1 | 36d8a747e12c375b07d132ed4d08f9fc77126a8b | https://github.com/bcaitech1/p4-mod-model_diet/tree/36d8a747e12c375b07d132ed4d08f9fc77126a8b | import torch
from typing import List
from typing import Union
import torch.nn as nn
def autopad(kernel_size: 'Union[int, List[int]]', padding:
'Union[int, None]'=None) ->Union[int, List[int]]:
"""Auto padding calculation for pad='same' in TensorFlow."""
if isinstance(kernel_size, int):
kernel_size = [kernel_size]
return padding or [(x // 2) for x in kernel_size]
class Activation:
"""Convert string activation name to the activation class."""
def __init__(self, act_type: 'Union[str, None]') ->None:
"""Convert string activation name to the activation class.
Args:
type: Activation name.
Returns:
nn.Identity if {type} is None.
"""
self.type = act_type
self.args = [1] if self.type == 'Softmax' else []
def __call__(self) ->nn.Module:
if self.type is None:
return nn.Identity()
elif hasattr(nn, self.type):
return getattr(nn, self.type)(*self.args)
else:
return getattr(__import__('src.modules.activations', fromlist=[
'']), self.type)()
class Model(nn.Module):
"""Standard convolution with batch normalization and activation."""
def __init__(self, in_channel: 'int', out_channels: 'int', kernel_size:
'int', stride: 'int'=1, padding: 'Union[int, None]'=None, groups:
'int'=1, activation: 'Union[str, None]'='ReLU') ->None:
"""Standard convolution with batch normalization and activation.
Args:
in_channel: input channels.
out_channels: output channels.
kernel_size: kernel size.
stride: stride.
padding: input padding. If None is given, autopad is applied
which is identical to padding='SAME' in TensorFlow.
groups: group convolution.
activation: activation name. If None is given, nn.Identity is applied
which is no activation.
"""
super().__init__()
self.conv = nn.Conv2d(in_channel, out_channels, kernel_size, stride,
padding=autopad(kernel_size, padding), groups=groups, bias=True)
self.act = Activation(activation)()
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
"""Forward."""
return self.act(self.conv(x))
def fusefoward(self, x: 'torch.Tensor') ->torch.Tensor:
"""Fuse forward."""
return self.act(self.conv(x))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
bodypose_model | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ej/cejfrwnzxinkchwn6symdb72fdtj7gix5hy2vuswodhbeh45mrae.py
# Topologically Sorted Source Nodes: [input_1, input_2], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# input_1 => convolution
# input_2 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 64
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/7z/c7zuih2ysjtir5rh5seep5ijnhokjlgkyjw2edhf257ahvz4iipr.py
# Topologically Sorted Source Nodes: [input_5], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# input_5 => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = (xindex // 32)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/xq/cxqz2dr7nh2qabrtemj52pazmhrknj5ltcy32ka252ia6a3jgpqi.py
# Topologically Sorted Source Nodes: [input_6, input_7], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# input_6 => convolution_2
# input_7 => relu_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 1024) % 128
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/pr/cpri5daxkfbmt5ostbhb5o2avircr64a2rmdkxfackaxyjfc7owe.py
# Topologically Sorted Source Nodes: [input_10], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# input_10 => getitem_2, getitem_3
# Graph fragment:
# %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (32 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (33 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/of/cof37d5wbqzvtkioj7k4me7wqpvfv55rs62ytonj7gij2o3abnod.py
# Topologically Sorted Source Nodes: [input_11, input_12], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# input_11 => convolution_4
# input_12 => relu_4
# Graph fragment:
# %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_10, %primals_11, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {})
triton_poi_fused_convolution_relu_4 = async_compile.triton('triton_poi_fused_convolution_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 256) % 256
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/mn/cmnzsv2cdbsuq2sygridqvwumzmcvknuthlumel5m25l2ajsr4ft.py
# Topologically Sorted Source Nodes: [input_19], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# input_19 => getitem_4, getitem_5
# Graph fragment:
# %getitem_4 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 0), kwargs = {})
# %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_5 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + (2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (17 + (2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ic/cicsjqc3cfcjzqlztx4hz7ssqwe47ngo3g2onc6463k3vgfmt5cw.py
# Topologically Sorted Source Nodes: [input_20, input_21], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# input_20 => convolution_8
# input_21 => relu_8
# Graph fragment:
# %convolution_8 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_4, %primals_18, %primals_19, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_8 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_8,), kwargs = {})
triton_poi_fused_convolution_relu_6 = async_compile.triton('triton_poi_fused_convolution_relu_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 64) % 512
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/rs/crsb2j7t6kjc2dizrgavde3h3rerob3nhf7iqux6o24562lkvvoe.py
# Topologically Sorted Source Nodes: [input_24, input_25], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# input_24 => convolution_10
# input_25 => relu_10
# Graph fragment:
# %convolution_10 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_9, %primals_22, %primals_23, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_10 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_10,), kwargs = {})
triton_poi_fused_convolution_relu_7 = async_compile.triton('triton_poi_fused_convolution_relu_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 64) % 256
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/qy/cqyis4pzdzl2zcpdenz7kfyw4uxhak4ugnkkhusp7xtxj4qytdez.py
# Topologically Sorted Source Nodes: [input_26, input_27], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# input_26 => convolution_11
# input_27 => relu_11
# Graph fragment:
# %convolution_11 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_10, %primals_24, %primals_25, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_11 : [num_users=8] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_11,), kwargs = {})
triton_poi_fused_convolution_relu_8 = async_compile.triton('triton_poi_fused_convolution_relu_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 64) % 128
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/xh/cxh5qnz7467zwx7kksukcyl5yqbimdzz2jusq6gmtz3v7ngsbddj.py
# Topologically Sorted Source Nodes: [out2], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# out2 => cat
# Graph fragment:
# %cat : [num_users=3] = call_function[target=torch.ops.aten.cat.default](args = ([%convolution_16, %convolution_21, %relu_11], 1), kwargs = {})
triton_poi_fused_cat_9 = async_compile.triton('triton_poi_fused_cat_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 47360
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 64) % 185
x0 = xindex % 64
x2 = (xindex // 11840)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 38, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (64*x1) + (2432*x2)), tmp4 & xmask, other=0.0)
tmp6 = tl.load(in_ptr1 + (x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tmp11 = tl.full([1], 57, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tmp10 & tmp12
tmp14 = tl.load(in_ptr2 + (x0 + (64*((-38) + x1)) + (1216*x2)), tmp13 & xmask, other=0.0)
tmp15 = tl.load(in_ptr3 + ((-38) + x1), tmp13 & xmask, eviction_policy='evict_last', other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp13, tmp16, tmp17)
tmp19 = tmp0 >= tmp11
tmp20 = tl.full([1], 185, tl.int64)
tmp21 = tmp0 < tmp20
tmp22 = tl.load(in_ptr4 + (x0 + (64*((-57) + x1)) + (8192*x2)), tmp19 & xmask, other=0.0)
tmp23 = tl.where(tmp13, tmp18, tmp22)
tmp24 = tl.where(tmp4, tmp9, tmp23)
tl.store(out_ptr0 + (x3), tmp24, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/4y/c4y6uyvhmsek266ivpsqvnnoksgofgtz3h3rggm6nksziikdh57s.py
# Topologically Sorted Source Nodes: [input_162], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# input_162 => convolution_84
# Graph fragment:
# %convolution_84 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_73, %primals_170, %primals_171, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_10 = async_compile.triton('triton_poi_fused_convolution_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 9728
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 64) % 38
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ce/ccekwplfoihswfouqfjqcwmfd2cg37pkjpmovikpxyaqzec4g3iq.py
# Topologically Sorted Source Nodes: [input_175, input_176], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# input_175 => convolution_91
# input_176 => relu_80
# Graph fragment:
# %convolution_91 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_79, %primals_184, %primals_185, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_80 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_91,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_80, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_11 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_11(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4864
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x1 = (xindex // 64) % 19
x2 = (xindex // 1216)
x3 = xindex % 1216
tmp0 = tl.load(in_out_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x4), tmp4, xmask)
tl.store(out_ptr0 + (x3 + (1280*x2)), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61, primals_62, primals_63, primals_64, primals_65, primals_66, primals_67, primals_68, primals_69, primals_70, primals_71, primals_72, primals_73, primals_74, primals_75, primals_76, primals_77, primals_78, primals_79, primals_80, primals_81, primals_82, primals_83, primals_84, primals_85, primals_86, primals_87, primals_88, primals_89, primals_90, primals_91, primals_92, primals_93, primals_94, primals_95, primals_96, primals_97, primals_98, primals_99, primals_100, primals_101, primals_102, primals_103, primals_104, primals_105, primals_106, primals_107, primals_108, primals_109, primals_110, primals_111, primals_112, primals_113, primals_114, primals_115, primals_116, primals_117, primals_118, primals_119, primals_120, primals_121, primals_122, primals_123, primals_124, primals_125, primals_126, primals_127, primals_128, primals_129, primals_130, primals_131, primals_132, primals_133, primals_134, primals_135, primals_136, primals_137, primals_138, primals_139, primals_140, primals_141, primals_142, primals_143, primals_144, primals_145, primals_146, primals_147, primals_148, primals_149, primals_150, primals_151, primals_152, primals_153, primals_154, primals_155, primals_156, primals_157, primals_158, primals_159, primals_160, primals_161, primals_162, primals_163, primals_164, primals_165, primals_166, primals_167, primals_168, primals_169, primals_170, primals_171, primals_172, primals_173, primals_174, primals_175, primals_176, primals_177, primals_178, primals_179, primals_180, primals_181, primals_182, primals_183, primals_184, primals_185 = args
args.clear()
assert_size_stride(primals_1, (64, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (64, ), (1, ))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (64, ), (1, ))
assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (128, ), (1, ))
assert_size_stride(primals_8, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_9, (128, ), (1, ))
assert_size_stride(primals_10, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_11, (256, ), (1, ))
assert_size_stride(primals_12, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_13, (256, ), (1, ))
assert_size_stride(primals_14, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_15, (256, ), (1, ))
assert_size_stride(primals_16, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_17, (256, ), (1, ))
assert_size_stride(primals_18, (512, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_19, (512, ), (1, ))
assert_size_stride(primals_20, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_21, (512, ), (1, ))
assert_size_stride(primals_22, (256, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_23, (256, ), (1, ))
assert_size_stride(primals_24, (128, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_25, (128, ), (1, ))
assert_size_stride(primals_26, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_27, (128, ), (1, ))
assert_size_stride(primals_28, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_29, (128, ), (1, ))
assert_size_stride(primals_30, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_31, (128, ), (1, ))
assert_size_stride(primals_32, (512, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_33, (512, ), (1, ))
assert_size_stride(primals_34, (38, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_35, (38, ), (1, ))
assert_size_stride(primals_36, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_37, (128, ), (1, ))
assert_size_stride(primals_38, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_39, (128, ), (1, ))
assert_size_stride(primals_40, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_41, (128, ), (1, ))
assert_size_stride(primals_42, (512, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_43, (512, ), (1, ))
assert_size_stride(primals_44, (19, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_45, (19, ), (1, ))
assert_size_stride(primals_46, (128, 185, 7, 7), (9065, 49, 7, 1))
assert_size_stride(primals_47, (128, ), (1, ))
assert_size_stride(primals_48, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_49, (128, ), (1, ))
assert_size_stride(primals_50, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_51, (128, ), (1, ))
assert_size_stride(primals_52, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_53, (128, ), (1, ))
assert_size_stride(primals_54, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_55, (128, ), (1, ))
assert_size_stride(primals_56, (128, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_57, (128, ), (1, ))
assert_size_stride(primals_58, (38, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_59, (38, ), (1, ))
assert_size_stride(primals_60, (128, 185, 7, 7), (9065, 49, 7, 1))
assert_size_stride(primals_61, (128, ), (1, ))
assert_size_stride(primals_62, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_63, (128, ), (1, ))
assert_size_stride(primals_64, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_65, (128, ), (1, ))
assert_size_stride(primals_66, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_67, (128, ), (1, ))
assert_size_stride(primals_68, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_69, (128, ), (1, ))
assert_size_stride(primals_70, (128, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_71, (128, ), (1, ))
assert_size_stride(primals_72, (19, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_73, (19, ), (1, ))
assert_size_stride(primals_74, (128, 185, 7, 7), (9065, 49, 7, 1))
assert_size_stride(primals_75, (128, ), (1, ))
assert_size_stride(primals_76, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_77, (128, ), (1, ))
assert_size_stride(primals_78, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_79, (128, ), (1, ))
assert_size_stride(primals_80, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_81, (128, ), (1, ))
assert_size_stride(primals_82, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_83, (128, ), (1, ))
assert_size_stride(primals_84, (128, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_85, (128, ), (1, ))
assert_size_stride(primals_86, (38, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_87, (38, ), (1, ))
assert_size_stride(primals_88, (128, 185, 7, 7), (9065, 49, 7, 1))
assert_size_stride(primals_89, (128, ), (1, ))
assert_size_stride(primals_90, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_91, (128, ), (1, ))
assert_size_stride(primals_92, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_93, (128, ), (1, ))
assert_size_stride(primals_94, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_95, (128, ), (1, ))
assert_size_stride(primals_96, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_97, (128, ), (1, ))
assert_size_stride(primals_98, (128, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_99, (128, ), (1, ))
assert_size_stride(primals_100, (19, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_101, (19, ), (1, ))
assert_size_stride(primals_102, (128, 185, 7, 7), (9065, 49, 7, 1))
assert_size_stride(primals_103, (128, ), (1, ))
assert_size_stride(primals_104, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_105, (128, ), (1, ))
assert_size_stride(primals_106, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_107, (128, ), (1, ))
assert_size_stride(primals_108, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_109, (128, ), (1, ))
assert_size_stride(primals_110, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_111, (128, ), (1, ))
assert_size_stride(primals_112, (128, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_113, (128, ), (1, ))
assert_size_stride(primals_114, (38, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_115, (38, ), (1, ))
assert_size_stride(primals_116, (128, 185, 7, 7), (9065, 49, 7, 1))
assert_size_stride(primals_117, (128, ), (1, ))
assert_size_stride(primals_118, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_119, (128, ), (1, ))
assert_size_stride(primals_120, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_121, (128, ), (1, ))
assert_size_stride(primals_122, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_123, (128, ), (1, ))
assert_size_stride(primals_124, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_125, (128, ), (1, ))
assert_size_stride(primals_126, (128, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_127, (128, ), (1, ))
assert_size_stride(primals_128, (19, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_129, (19, ), (1, ))
assert_size_stride(primals_130, (128, 185, 7, 7), (9065, 49, 7, 1))
assert_size_stride(primals_131, (128, ), (1, ))
assert_size_stride(primals_132, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_133, (128, ), (1, ))
assert_size_stride(primals_134, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_135, (128, ), (1, ))
assert_size_stride(primals_136, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_137, (128, ), (1, ))
assert_size_stride(primals_138, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_139, (128, ), (1, ))
assert_size_stride(primals_140, (128, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_141, (128, ), (1, ))
assert_size_stride(primals_142, (38, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_143, (38, ), (1, ))
assert_size_stride(primals_144, (128, 185, 7, 7), (9065, 49, 7, 1))
assert_size_stride(primals_145, (128, ), (1, ))
assert_size_stride(primals_146, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_147, (128, ), (1, ))
assert_size_stride(primals_148, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_149, (128, ), (1, ))
assert_size_stride(primals_150, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_151, (128, ), (1, ))
assert_size_stride(primals_152, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_153, (128, ), (1, ))
assert_size_stride(primals_154, (128, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_155, (128, ), (1, ))
assert_size_stride(primals_156, (19, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_157, (19, ), (1, ))
assert_size_stride(primals_158, (128, 185, 7, 7), (9065, 49, 7, 1))
assert_size_stride(primals_159, (128, ), (1, ))
assert_size_stride(primals_160, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_161, (128, ), (1, ))
assert_size_stride(primals_162, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_163, (128, ), (1, ))
assert_size_stride(primals_164, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_165, (128, ), (1, ))
assert_size_stride(primals_166, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_167, (128, ), (1, ))
assert_size_stride(primals_168, (128, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_169, (128, ), (1, ))
assert_size_stride(primals_170, (38, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_171, (38, ), (1, ))
assert_size_stride(primals_172, (128, 185, 7, 7), (9065, 49, 7, 1))
assert_size_stride(primals_173, (128, ), (1, ))
assert_size_stride(primals_174, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_175, (128, ), (1, ))
assert_size_stride(primals_176, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_177, (128, ), (1, ))
assert_size_stride(primals_178, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_179, (128, ), (1, ))
assert_size_stride(primals_180, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_181, (128, ), (1, ))
assert_size_stride(primals_182, (128, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_183, (128, ), (1, ))
assert_size_stride(primals_184, (19, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_185, (19, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [input_1, input_2], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 1048576, grid=grid(1048576), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [input_3], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [input_3, input_4], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_0.run(buf3, primals_5, 1048576, grid=grid(1048576), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.float32)
buf5 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.int8)
# Topologically Sorted Source Nodes: [input_5], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_1.run(buf3, buf4, buf5, 262144, grid=grid(262144), stream=stream0)
# Topologically Sorted Source Nodes: [input_6], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 128, 32, 32), (131072, 1024, 32, 1))
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [input_6, input_7], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf7, primals_7, 524288, grid=grid(524288), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [input_8], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 128, 32, 32), (131072, 1024, 32, 1))
buf9 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [input_8, input_9], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf9, primals_9, 524288, grid=grid(524288), stream=stream0)
del primals_9
buf10 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.float32)
buf11 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.int8)
# Topologically Sorted Source Nodes: [input_10], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_3.run(buf9, buf10, buf11, 131072, grid=grid(131072), stream=stream0)
# Topologically Sorted Source Nodes: [input_11], Original ATen: [aten.convolution]
buf12 = extern_kernels.convolution(buf10, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 256, 16, 16), (65536, 256, 16, 1))
buf13 = buf12; del buf12 # reuse
# Topologically Sorted Source Nodes: [input_11, input_12], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_4.run(buf13, primals_11, 262144, grid=grid(262144), stream=stream0)
del primals_11
# Topologically Sorted Source Nodes: [input_13], Original ATen: [aten.convolution]
buf14 = extern_kernels.convolution(buf13, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 256, 16, 16), (65536, 256, 16, 1))
buf15 = buf14; del buf14 # reuse
# Topologically Sorted Source Nodes: [input_13, input_14], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_4.run(buf15, primals_13, 262144, grid=grid(262144), stream=stream0)
del primals_13
# Topologically Sorted Source Nodes: [input_15], Original ATen: [aten.convolution]
buf16 = extern_kernels.convolution(buf15, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 256, 16, 16), (65536, 256, 16, 1))
buf17 = buf16; del buf16 # reuse
# Topologically Sorted Source Nodes: [input_15, input_16], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_4.run(buf17, primals_15, 262144, grid=grid(262144), stream=stream0)
del primals_15
# Topologically Sorted Source Nodes: [input_17], Original ATen: [aten.convolution]
buf18 = extern_kernels.convolution(buf17, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 256, 16, 16), (65536, 256, 16, 1))
buf19 = buf18; del buf18 # reuse
# Topologically Sorted Source Nodes: [input_17, input_18], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_4.run(buf19, primals_17, 262144, grid=grid(262144), stream=stream0)
del primals_17
buf20 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch.float32)
buf21 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch.int8)
# Topologically Sorted Source Nodes: [input_19], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_5.run(buf19, buf20, buf21, 65536, grid=grid(65536), stream=stream0)
# Topologically Sorted Source Nodes: [input_20], Original ATen: [aten.convolution]
buf22 = extern_kernels.convolution(buf20, primals_18, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf22, (4, 512, 8, 8), (32768, 64, 8, 1))
buf23 = buf22; del buf22 # reuse
# Topologically Sorted Source Nodes: [input_20, input_21], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_6.run(buf23, primals_19, 131072, grid=grid(131072), stream=stream0)
del primals_19
# Topologically Sorted Source Nodes: [input_22], Original ATen: [aten.convolution]
buf24 = extern_kernels.convolution(buf23, primals_20, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 512, 8, 8), (32768, 64, 8, 1))
buf25 = buf24; del buf24 # reuse
# Topologically Sorted Source Nodes: [input_22, input_23], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_6.run(buf25, primals_21, 131072, grid=grid(131072), stream=stream0)
del primals_21
# Topologically Sorted Source Nodes: [input_24], Original ATen: [aten.convolution]
buf26 = extern_kernels.convolution(buf25, primals_22, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf26, (4, 256, 8, 8), (16384, 64, 8, 1))
buf27 = buf26; del buf26 # reuse
# Topologically Sorted Source Nodes: [input_24, input_25], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_7.run(buf27, primals_23, 65536, grid=grid(65536), stream=stream0)
del primals_23
# Topologically Sorted Source Nodes: [input_26], Original ATen: [aten.convolution]
buf28 = extern_kernels.convolution(buf27, primals_24, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf28, (4, 128, 8, 8), (8192, 64, 8, 1))
buf29 = buf28; del buf28 # reuse
# Topologically Sorted Source Nodes: [input_26, input_27], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf29, primals_25, 32768, grid=grid(32768), stream=stream0)
del primals_25
# Topologically Sorted Source Nodes: [input_28], Original ATen: [aten.convolution]
buf30 = extern_kernels.convolution(buf29, primals_26, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf30, (4, 128, 8, 8), (8192, 64, 8, 1))
buf31 = buf30; del buf30 # reuse
# Topologically Sorted Source Nodes: [input_28, input_29], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf31, primals_27, 32768, grid=grid(32768), stream=stream0)
del primals_27
# Topologically Sorted Source Nodes: [input_30], Original ATen: [aten.convolution]
buf32 = extern_kernels.convolution(buf31, primals_28, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf32, (4, 128, 8, 8), (8192, 64, 8, 1))
buf33 = buf32; del buf32 # reuse
# Topologically Sorted Source Nodes: [input_30, input_31], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf33, primals_29, 32768, grid=grid(32768), stream=stream0)
del primals_29
# Topologically Sorted Source Nodes: [input_32], Original ATen: [aten.convolution]
buf34 = extern_kernels.convolution(buf33, primals_30, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf34, (4, 128, 8, 8), (8192, 64, 8, 1))
buf35 = buf34; del buf34 # reuse
# Topologically Sorted Source Nodes: [input_32, input_33], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf35, primals_31, 32768, grid=grid(32768), stream=stream0)
del primals_31
# Topologically Sorted Source Nodes: [input_34], Original ATen: [aten.convolution]
buf36 = extern_kernels.convolution(buf35, primals_32, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf36, (4, 512, 8, 8), (32768, 64, 8, 1))
buf37 = buf36; del buf36 # reuse
# Topologically Sorted Source Nodes: [input_34, input_35], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_6.run(buf37, primals_33, 131072, grid=grid(131072), stream=stream0)
del primals_33
# Topologically Sorted Source Nodes: [input_36], Original ATen: [aten.convolution]
buf38 = extern_kernels.convolution(buf37, primals_34, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf38, (4, 38, 8, 8), (2432, 64, 8, 1))
# Topologically Sorted Source Nodes: [input_37], Original ATen: [aten.convolution]
buf39 = extern_kernels.convolution(buf29, primals_36, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf39, (4, 128, 8, 8), (8192, 64, 8, 1))
buf40 = buf39; del buf39 # reuse
# Topologically Sorted Source Nodes: [input_37, input_38], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf40, primals_37, 32768, grid=grid(32768), stream=stream0)
del primals_37
# Topologically Sorted Source Nodes: [input_39], Original ATen: [aten.convolution]
buf41 = extern_kernels.convolution(buf40, primals_38, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf41, (4, 128, 8, 8), (8192, 64, 8, 1))
buf42 = buf41; del buf41 # reuse
# Topologically Sorted Source Nodes: [input_39, input_40], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf42, primals_39, 32768, grid=grid(32768), stream=stream0)
del primals_39
# Topologically Sorted Source Nodes: [input_41], Original ATen: [aten.convolution]
buf43 = extern_kernels.convolution(buf42, primals_40, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf43, (4, 128, 8, 8), (8192, 64, 8, 1))
buf44 = buf43; del buf43 # reuse
# Topologically Sorted Source Nodes: [input_41, input_42], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf44, primals_41, 32768, grid=grid(32768), stream=stream0)
del primals_41
# Topologically Sorted Source Nodes: [input_43], Original ATen: [aten.convolution]
buf45 = extern_kernels.convolution(buf44, primals_42, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf45, (4, 512, 8, 8), (32768, 64, 8, 1))
buf46 = buf45; del buf45 # reuse
# Topologically Sorted Source Nodes: [input_43, input_44], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_6.run(buf46, primals_43, 131072, grid=grid(131072), stream=stream0)
del primals_43
# Topologically Sorted Source Nodes: [input_45], Original ATen: [aten.convolution]
buf47 = extern_kernels.convolution(buf46, primals_44, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf47, (4, 19, 8, 8), (1216, 64, 8, 1))
buf48 = empty_strided_cuda((4, 185, 8, 8), (11840, 64, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [out2], Original ATen: [aten.cat]
triton_poi_fused_cat_9.run(buf38, primals_35, buf47, primals_45, buf29, buf48, 47360, grid=grid(47360), stream=stream0)
del buf38
del buf47
del primals_35
del primals_45
# Topologically Sorted Source Nodes: [input_46], Original ATen: [aten.convolution]
buf49 = extern_kernels.convolution(buf48, primals_46, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf49, (4, 128, 8, 8), (8192, 64, 8, 1))
buf50 = buf49; del buf49 # reuse
# Topologically Sorted Source Nodes: [input_46, input_47], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf50, primals_47, 32768, grid=grid(32768), stream=stream0)
del primals_47
# Topologically Sorted Source Nodes: [input_48], Original ATen: [aten.convolution]
buf51 = extern_kernels.convolution(buf50, primals_48, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf51, (4, 128, 8, 8), (8192, 64, 8, 1))
buf52 = buf51; del buf51 # reuse
# Topologically Sorted Source Nodes: [input_48, input_49], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf52, primals_49, 32768, grid=grid(32768), stream=stream0)
del primals_49
# Topologically Sorted Source Nodes: [input_50], Original ATen: [aten.convolution]
buf53 = extern_kernels.convolution(buf52, primals_50, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf53, (4, 128, 8, 8), (8192, 64, 8, 1))
buf54 = buf53; del buf53 # reuse
# Topologically Sorted Source Nodes: [input_50, input_51], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf54, primals_51, 32768, grid=grid(32768), stream=stream0)
del primals_51
# Topologically Sorted Source Nodes: [input_52], Original ATen: [aten.convolution]
buf55 = extern_kernels.convolution(buf54, primals_52, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf55, (4, 128, 8, 8), (8192, 64, 8, 1))
buf56 = buf55; del buf55 # reuse
# Topologically Sorted Source Nodes: [input_52, input_53], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf56, primals_53, 32768, grid=grid(32768), stream=stream0)
del primals_53
# Topologically Sorted Source Nodes: [input_54], Original ATen: [aten.convolution]
buf57 = extern_kernels.convolution(buf56, primals_54, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf57, (4, 128, 8, 8), (8192, 64, 8, 1))
buf58 = buf57; del buf57 # reuse
# Topologically Sorted Source Nodes: [input_54, input_55], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf58, primals_55, 32768, grid=grid(32768), stream=stream0)
del primals_55
# Topologically Sorted Source Nodes: [input_56], Original ATen: [aten.convolution]
buf59 = extern_kernels.convolution(buf58, primals_56, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf59, (4, 128, 8, 8), (8192, 64, 8, 1))
buf60 = buf59; del buf59 # reuse
# Topologically Sorted Source Nodes: [input_56, input_57], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf60, primals_57, 32768, grid=grid(32768), stream=stream0)
del primals_57
# Topologically Sorted Source Nodes: [input_58], Original ATen: [aten.convolution]
buf61 = extern_kernels.convolution(buf60, primals_58, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf61, (4, 38, 8, 8), (2432, 64, 8, 1))
# Topologically Sorted Source Nodes: [input_59], Original ATen: [aten.convolution]
buf62 = extern_kernels.convolution(buf48, primals_60, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf62, (4, 128, 8, 8), (8192, 64, 8, 1))
buf63 = buf62; del buf62 # reuse
# Topologically Sorted Source Nodes: [input_59, input_60], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf63, primals_61, 32768, grid=grid(32768), stream=stream0)
del primals_61
# Topologically Sorted Source Nodes: [input_61], Original ATen: [aten.convolution]
buf64 = extern_kernels.convolution(buf63, primals_62, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf64, (4, 128, 8, 8), (8192, 64, 8, 1))
buf65 = buf64; del buf64 # reuse
# Topologically Sorted Source Nodes: [input_61, input_62], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf65, primals_63, 32768, grid=grid(32768), stream=stream0)
del primals_63
# Topologically Sorted Source Nodes: [input_63], Original ATen: [aten.convolution]
buf66 = extern_kernels.convolution(buf65, primals_64, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf66, (4, 128, 8, 8), (8192, 64, 8, 1))
buf67 = buf66; del buf66 # reuse
# Topologically Sorted Source Nodes: [input_63, input_64], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf67, primals_65, 32768, grid=grid(32768), stream=stream0)
del primals_65
# Topologically Sorted Source Nodes: [input_65], Original ATen: [aten.convolution]
buf68 = extern_kernels.convolution(buf67, primals_66, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf68, (4, 128, 8, 8), (8192, 64, 8, 1))
buf69 = buf68; del buf68 # reuse
# Topologically Sorted Source Nodes: [input_65, input_66], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf69, primals_67, 32768, grid=grid(32768), stream=stream0)
del primals_67
# Topologically Sorted Source Nodes: [input_67], Original ATen: [aten.convolution]
buf70 = extern_kernels.convolution(buf69, primals_68, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf70, (4, 128, 8, 8), (8192, 64, 8, 1))
buf71 = buf70; del buf70 # reuse
# Topologically Sorted Source Nodes: [input_67, input_68], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf71, primals_69, 32768, grid=grid(32768), stream=stream0)
del primals_69
# Topologically Sorted Source Nodes: [input_69], Original ATen: [aten.convolution]
buf72 = extern_kernels.convolution(buf71, primals_70, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf72, (4, 128, 8, 8), (8192, 64, 8, 1))
buf73 = buf72; del buf72 # reuse
# Topologically Sorted Source Nodes: [input_69, input_70], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf73, primals_71, 32768, grid=grid(32768), stream=stream0)
del primals_71
# Topologically Sorted Source Nodes: [input_71], Original ATen: [aten.convolution]
buf74 = extern_kernels.convolution(buf73, primals_72, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf74, (4, 19, 8, 8), (1216, 64, 8, 1))
buf75 = empty_strided_cuda((4, 185, 8, 8), (11840, 64, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [out3], Original ATen: [aten.cat]
triton_poi_fused_cat_9.run(buf61, primals_59, buf74, primals_73, buf29, buf75, 47360, grid=grid(47360), stream=stream0)
del buf61
del buf74
del primals_59
del primals_73
# Topologically Sorted Source Nodes: [input_72], Original ATen: [aten.convolution]
buf76 = extern_kernels.convolution(buf75, primals_74, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf76, (4, 128, 8, 8), (8192, 64, 8, 1))
buf77 = buf76; del buf76 # reuse
# Topologically Sorted Source Nodes: [input_72, input_73], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf77, primals_75, 32768, grid=grid(32768), stream=stream0)
del primals_75
# Topologically Sorted Source Nodes: [input_74], Original ATen: [aten.convolution]
buf78 = extern_kernels.convolution(buf77, primals_76, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf78, (4, 128, 8, 8), (8192, 64, 8, 1))
buf79 = buf78; del buf78 # reuse
# Topologically Sorted Source Nodes: [input_74, input_75], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf79, primals_77, 32768, grid=grid(32768), stream=stream0)
del primals_77
# Topologically Sorted Source Nodes: [input_76], Original ATen: [aten.convolution]
buf80 = extern_kernels.convolution(buf79, primals_78, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf80, (4, 128, 8, 8), (8192, 64, 8, 1))
buf81 = buf80; del buf80 # reuse
# Topologically Sorted Source Nodes: [input_76, input_77], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf81, primals_79, 32768, grid=grid(32768), stream=stream0)
del primals_79
# Topologically Sorted Source Nodes: [input_78], Original ATen: [aten.convolution]
buf82 = extern_kernels.convolution(buf81, primals_80, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf82, (4, 128, 8, 8), (8192, 64, 8, 1))
buf83 = buf82; del buf82 # reuse
# Topologically Sorted Source Nodes: [input_78, input_79], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf83, primals_81, 32768, grid=grid(32768), stream=stream0)
del primals_81
# Topologically Sorted Source Nodes: [input_80], Original ATen: [aten.convolution]
buf84 = extern_kernels.convolution(buf83, primals_82, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf84, (4, 128, 8, 8), (8192, 64, 8, 1))
buf85 = buf84; del buf84 # reuse
# Topologically Sorted Source Nodes: [input_80, input_81], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf85, primals_83, 32768, grid=grid(32768), stream=stream0)
del primals_83
# Topologically Sorted Source Nodes: [input_82], Original ATen: [aten.convolution]
buf86 = extern_kernels.convolution(buf85, primals_84, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf86, (4, 128, 8, 8), (8192, 64, 8, 1))
buf87 = buf86; del buf86 # reuse
# Topologically Sorted Source Nodes: [input_82, input_83], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf87, primals_85, 32768, grid=grid(32768), stream=stream0)
del primals_85
# Topologically Sorted Source Nodes: [input_84], Original ATen: [aten.convolution]
buf88 = extern_kernels.convolution(buf87, primals_86, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf88, (4, 38, 8, 8), (2432, 64, 8, 1))
# Topologically Sorted Source Nodes: [input_85], Original ATen: [aten.convolution]
buf89 = extern_kernels.convolution(buf75, primals_88, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf89, (4, 128, 8, 8), (8192, 64, 8, 1))
buf90 = buf89; del buf89 # reuse
# Topologically Sorted Source Nodes: [input_85, input_86], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf90, primals_89, 32768, grid=grid(32768), stream=stream0)
del primals_89
# Topologically Sorted Source Nodes: [input_87], Original ATen: [aten.convolution]
buf91 = extern_kernels.convolution(buf90, primals_90, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf91, (4, 128, 8, 8), (8192, 64, 8, 1))
buf92 = buf91; del buf91 # reuse
# Topologically Sorted Source Nodes: [input_87, input_88], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf92, primals_91, 32768, grid=grid(32768), stream=stream0)
del primals_91
# Topologically Sorted Source Nodes: [input_89], Original ATen: [aten.convolution]
buf93 = extern_kernels.convolution(buf92, primals_92, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf93, (4, 128, 8, 8), (8192, 64, 8, 1))
buf94 = buf93; del buf93 # reuse
# Topologically Sorted Source Nodes: [input_89, input_90], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf94, primals_93, 32768, grid=grid(32768), stream=stream0)
del primals_93
# Topologically Sorted Source Nodes: [input_91], Original ATen: [aten.convolution]
buf95 = extern_kernels.convolution(buf94, primals_94, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf95, (4, 128, 8, 8), (8192, 64, 8, 1))
buf96 = buf95; del buf95 # reuse
# Topologically Sorted Source Nodes: [input_91, input_92], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf96, primals_95, 32768, grid=grid(32768), stream=stream0)
del primals_95
# Topologically Sorted Source Nodes: [input_93], Original ATen: [aten.convolution]
buf97 = extern_kernels.convolution(buf96, primals_96, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf97, (4, 128, 8, 8), (8192, 64, 8, 1))
buf98 = buf97; del buf97 # reuse
# Topologically Sorted Source Nodes: [input_93, input_94], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf98, primals_97, 32768, grid=grid(32768), stream=stream0)
del primals_97
# Topologically Sorted Source Nodes: [input_95], Original ATen: [aten.convolution]
buf99 = extern_kernels.convolution(buf98, primals_98, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf99, (4, 128, 8, 8), (8192, 64, 8, 1))
buf100 = buf99; del buf99 # reuse
# Topologically Sorted Source Nodes: [input_95, input_96], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf100, primals_99, 32768, grid=grid(32768), stream=stream0)
del primals_99
# Topologically Sorted Source Nodes: [input_97], Original ATen: [aten.convolution]
buf101 = extern_kernels.convolution(buf100, primals_100, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf101, (4, 19, 8, 8), (1216, 64, 8, 1))
buf102 = empty_strided_cuda((4, 185, 8, 8), (11840, 64, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [out4], Original ATen: [aten.cat]
triton_poi_fused_cat_9.run(buf88, primals_87, buf101, primals_101, buf29, buf102, 47360, grid=grid(47360), stream=stream0)
del buf101
del buf88
del primals_101
del primals_87
# Topologically Sorted Source Nodes: [input_98], Original ATen: [aten.convolution]
buf103 = extern_kernels.convolution(buf102, primals_102, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf103, (4, 128, 8, 8), (8192, 64, 8, 1))
buf104 = buf103; del buf103 # reuse
# Topologically Sorted Source Nodes: [input_98, input_99], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf104, primals_103, 32768, grid=grid(32768), stream=stream0)
del primals_103
# Topologically Sorted Source Nodes: [input_100], Original ATen: [aten.convolution]
buf105 = extern_kernels.convolution(buf104, primals_104, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf105, (4, 128, 8, 8), (8192, 64, 8, 1))
buf106 = buf105; del buf105 # reuse
# Topologically Sorted Source Nodes: [input_100, input_101], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf106, primals_105, 32768, grid=grid(32768), stream=stream0)
del primals_105
# Topologically Sorted Source Nodes: [input_102], Original ATen: [aten.convolution]
buf107 = extern_kernels.convolution(buf106, primals_106, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf107, (4, 128, 8, 8), (8192, 64, 8, 1))
buf108 = buf107; del buf107 # reuse
# Topologically Sorted Source Nodes: [input_102, input_103], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf108, primals_107, 32768, grid=grid(32768), stream=stream0)
del primals_107
# Topologically Sorted Source Nodes: [input_104], Original ATen: [aten.convolution]
buf109 = extern_kernels.convolution(buf108, primals_108, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf109, (4, 128, 8, 8), (8192, 64, 8, 1))
buf110 = buf109; del buf109 # reuse
# Topologically Sorted Source Nodes: [input_104, input_105], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf110, primals_109, 32768, grid=grid(32768), stream=stream0)
del primals_109
# Topologically Sorted Source Nodes: [input_106], Original ATen: [aten.convolution]
buf111 = extern_kernels.convolution(buf110, primals_110, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf111, (4, 128, 8, 8), (8192, 64, 8, 1))
buf112 = buf111; del buf111 # reuse
# Topologically Sorted Source Nodes: [input_106, input_107], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf112, primals_111, 32768, grid=grid(32768), stream=stream0)
del primals_111
# Topologically Sorted Source Nodes: [input_108], Original ATen: [aten.convolution]
buf113 = extern_kernels.convolution(buf112, primals_112, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf113, (4, 128, 8, 8), (8192, 64, 8, 1))
buf114 = buf113; del buf113 # reuse
# Topologically Sorted Source Nodes: [input_108, input_109], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf114, primals_113, 32768, grid=grid(32768), stream=stream0)
del primals_113
# Topologically Sorted Source Nodes: [input_110], Original ATen: [aten.convolution]
buf115 = extern_kernels.convolution(buf114, primals_114, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf115, (4, 38, 8, 8), (2432, 64, 8, 1))
# Topologically Sorted Source Nodes: [input_111], Original ATen: [aten.convolution]
buf116 = extern_kernels.convolution(buf102, primals_116, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf116, (4, 128, 8, 8), (8192, 64, 8, 1))
buf117 = buf116; del buf116 # reuse
# Topologically Sorted Source Nodes: [input_111, input_112], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf117, primals_117, 32768, grid=grid(32768), stream=stream0)
del primals_117
# Topologically Sorted Source Nodes: [input_113], Original ATen: [aten.convolution]
buf118 = extern_kernels.convolution(buf117, primals_118, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf118, (4, 128, 8, 8), (8192, 64, 8, 1))
buf119 = buf118; del buf118 # reuse
# Topologically Sorted Source Nodes: [input_113, input_114], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf119, primals_119, 32768, grid=grid(32768), stream=stream0)
del primals_119
# Topologically Sorted Source Nodes: [input_115], Original ATen: [aten.convolution]
buf120 = extern_kernels.convolution(buf119, primals_120, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf120, (4, 128, 8, 8), (8192, 64, 8, 1))
buf121 = buf120; del buf120 # reuse
# Topologically Sorted Source Nodes: [input_115, input_116], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf121, primals_121, 32768, grid=grid(32768), stream=stream0)
del primals_121
# Topologically Sorted Source Nodes: [input_117], Original ATen: [aten.convolution]
buf122 = extern_kernels.convolution(buf121, primals_122, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf122, (4, 128, 8, 8), (8192, 64, 8, 1))
buf123 = buf122; del buf122 # reuse
# Topologically Sorted Source Nodes: [input_117, input_118], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf123, primals_123, 32768, grid=grid(32768), stream=stream0)
del primals_123
# Topologically Sorted Source Nodes: [input_119], Original ATen: [aten.convolution]
buf124 = extern_kernels.convolution(buf123, primals_124, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf124, (4, 128, 8, 8), (8192, 64, 8, 1))
buf125 = buf124; del buf124 # reuse
# Topologically Sorted Source Nodes: [input_119, input_120], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf125, primals_125, 32768, grid=grid(32768), stream=stream0)
del primals_125
# Topologically Sorted Source Nodes: [input_121], Original ATen: [aten.convolution]
buf126 = extern_kernels.convolution(buf125, primals_126, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf126, (4, 128, 8, 8), (8192, 64, 8, 1))
buf127 = buf126; del buf126 # reuse
# Topologically Sorted Source Nodes: [input_121, input_122], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf127, primals_127, 32768, grid=grid(32768), stream=stream0)
del primals_127
# Topologically Sorted Source Nodes: [input_123], Original ATen: [aten.convolution]
buf128 = extern_kernels.convolution(buf127, primals_128, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf128, (4, 19, 8, 8), (1216, 64, 8, 1))
buf129 = empty_strided_cuda((4, 185, 8, 8), (11840, 64, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [out5], Original ATen: [aten.cat]
triton_poi_fused_cat_9.run(buf115, primals_115, buf128, primals_129, buf29, buf129, 47360, grid=grid(47360), stream=stream0)
del buf115
del buf128
del primals_115
del primals_129
# Topologically Sorted Source Nodes: [input_124], Original ATen: [aten.convolution]
buf130 = extern_kernels.convolution(buf129, primals_130, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf130, (4, 128, 8, 8), (8192, 64, 8, 1))
buf131 = buf130; del buf130 # reuse
# Topologically Sorted Source Nodes: [input_124, input_125], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf131, primals_131, 32768, grid=grid(32768), stream=stream0)
del primals_131
# Topologically Sorted Source Nodes: [input_126], Original ATen: [aten.convolution]
buf132 = extern_kernels.convolution(buf131, primals_132, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf132, (4, 128, 8, 8), (8192, 64, 8, 1))
buf133 = buf132; del buf132 # reuse
# Topologically Sorted Source Nodes: [input_126, input_127], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf133, primals_133, 32768, grid=grid(32768), stream=stream0)
del primals_133
# Topologically Sorted Source Nodes: [input_128], Original ATen: [aten.convolution]
buf134 = extern_kernels.convolution(buf133, primals_134, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf134, (4, 128, 8, 8), (8192, 64, 8, 1))
buf135 = buf134; del buf134 # reuse
# Topologically Sorted Source Nodes: [input_128, input_129], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf135, primals_135, 32768, grid=grid(32768), stream=stream0)
del primals_135
# Topologically Sorted Source Nodes: [input_130], Original ATen: [aten.convolution]
buf136 = extern_kernels.convolution(buf135, primals_136, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf136, (4, 128, 8, 8), (8192, 64, 8, 1))
buf137 = buf136; del buf136 # reuse
# Topologically Sorted Source Nodes: [input_130, input_131], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf137, primals_137, 32768, grid=grid(32768), stream=stream0)
del primals_137
# Topologically Sorted Source Nodes: [input_132], Original ATen: [aten.convolution]
buf138 = extern_kernels.convolution(buf137, primals_138, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf138, (4, 128, 8, 8), (8192, 64, 8, 1))
buf139 = buf138; del buf138 # reuse
# Topologically Sorted Source Nodes: [input_132, input_133], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf139, primals_139, 32768, grid=grid(32768), stream=stream0)
del primals_139
# Topologically Sorted Source Nodes: [input_134], Original ATen: [aten.convolution]
buf140 = extern_kernels.convolution(buf139, primals_140, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf140, (4, 128, 8, 8), (8192, 64, 8, 1))
buf141 = buf140; del buf140 # reuse
# Topologically Sorted Source Nodes: [input_134, input_135], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf141, primals_141, 32768, grid=grid(32768), stream=stream0)
del primals_141
# Topologically Sorted Source Nodes: [input_136], Original ATen: [aten.convolution]
buf142 = extern_kernels.convolution(buf141, primals_142, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf142, (4, 38, 8, 8), (2432, 64, 8, 1))
# Topologically Sorted Source Nodes: [input_137], Original ATen: [aten.convolution]
buf143 = extern_kernels.convolution(buf129, primals_144, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf143, (4, 128, 8, 8), (8192, 64, 8, 1))
buf144 = buf143; del buf143 # reuse
# Topologically Sorted Source Nodes: [input_137, input_138], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf144, primals_145, 32768, grid=grid(32768), stream=stream0)
del primals_145
# Topologically Sorted Source Nodes: [input_139], Original ATen: [aten.convolution]
buf145 = extern_kernels.convolution(buf144, primals_146, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf145, (4, 128, 8, 8), (8192, 64, 8, 1))
buf146 = buf145; del buf145 # reuse
# Topologically Sorted Source Nodes: [input_139, input_140], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf146, primals_147, 32768, grid=grid(32768), stream=stream0)
del primals_147
# Topologically Sorted Source Nodes: [input_141], Original ATen: [aten.convolution]
buf147 = extern_kernels.convolution(buf146, primals_148, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf147, (4, 128, 8, 8), (8192, 64, 8, 1))
buf148 = buf147; del buf147 # reuse
# Topologically Sorted Source Nodes: [input_141, input_142], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf148, primals_149, 32768, grid=grid(32768), stream=stream0)
del primals_149
# Topologically Sorted Source Nodes: [input_143], Original ATen: [aten.convolution]
buf149 = extern_kernels.convolution(buf148, primals_150, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf149, (4, 128, 8, 8), (8192, 64, 8, 1))
buf150 = buf149; del buf149 # reuse
# Topologically Sorted Source Nodes: [input_143, input_144], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf150, primals_151, 32768, grid=grid(32768), stream=stream0)
del primals_151
# Topologically Sorted Source Nodes: [input_145], Original ATen: [aten.convolution]
buf151 = extern_kernels.convolution(buf150, primals_152, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf151, (4, 128, 8, 8), (8192, 64, 8, 1))
buf152 = buf151; del buf151 # reuse
# Topologically Sorted Source Nodes: [input_145, input_146], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf152, primals_153, 32768, grid=grid(32768), stream=stream0)
del primals_153
# Topologically Sorted Source Nodes: [input_147], Original ATen: [aten.convolution]
buf153 = extern_kernels.convolution(buf152, primals_154, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf153, (4, 128, 8, 8), (8192, 64, 8, 1))
buf154 = buf153; del buf153 # reuse
# Topologically Sorted Source Nodes: [input_147, input_148], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf154, primals_155, 32768, grid=grid(32768), stream=stream0)
del primals_155
# Topologically Sorted Source Nodes: [input_149], Original ATen: [aten.convolution]
buf155 = extern_kernels.convolution(buf154, primals_156, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf155, (4, 19, 8, 8), (1216, 64, 8, 1))
buf156 = empty_strided_cuda((4, 185, 8, 8), (11840, 64, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [out6], Original ATen: [aten.cat]
triton_poi_fused_cat_9.run(buf142, primals_143, buf155, primals_157, buf29, buf156, 47360, grid=grid(47360), stream=stream0)
del buf142
del buf155
del primals_143
del primals_157
# Topologically Sorted Source Nodes: [input_150], Original ATen: [aten.convolution]
buf157 = extern_kernels.convolution(buf156, primals_158, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf157, (4, 128, 8, 8), (8192, 64, 8, 1))
buf158 = buf157; del buf157 # reuse
# Topologically Sorted Source Nodes: [input_150, input_151], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf158, primals_159, 32768, grid=grid(32768), stream=stream0)
del primals_159
# Topologically Sorted Source Nodes: [input_152], Original ATen: [aten.convolution]
buf159 = extern_kernels.convolution(buf158, primals_160, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf159, (4, 128, 8, 8), (8192, 64, 8, 1))
buf160 = buf159; del buf159 # reuse
# Topologically Sorted Source Nodes: [input_152, input_153], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf160, primals_161, 32768, grid=grid(32768), stream=stream0)
del primals_161
# Topologically Sorted Source Nodes: [input_154], Original ATen: [aten.convolution]
buf161 = extern_kernels.convolution(buf160, primals_162, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf161, (4, 128, 8, 8), (8192, 64, 8, 1))
buf162 = buf161; del buf161 # reuse
# Topologically Sorted Source Nodes: [input_154, input_155], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf162, primals_163, 32768, grid=grid(32768), stream=stream0)
del primals_163
# Topologically Sorted Source Nodes: [input_156], Original ATen: [aten.convolution]
buf163 = extern_kernels.convolution(buf162, primals_164, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf163, (4, 128, 8, 8), (8192, 64, 8, 1))
buf164 = buf163; del buf163 # reuse
# Topologically Sorted Source Nodes: [input_156, input_157], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf164, primals_165, 32768, grid=grid(32768), stream=stream0)
del primals_165
# Topologically Sorted Source Nodes: [input_158], Original ATen: [aten.convolution]
buf165 = extern_kernels.convolution(buf164, primals_166, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf165, (4, 128, 8, 8), (8192, 64, 8, 1))
buf166 = buf165; del buf165 # reuse
# Topologically Sorted Source Nodes: [input_158, input_159], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf166, primals_167, 32768, grid=grid(32768), stream=stream0)
del primals_167
# Topologically Sorted Source Nodes: [input_160], Original ATen: [aten.convolution]
buf167 = extern_kernels.convolution(buf166, primals_168, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf167, (4, 128, 8, 8), (8192, 64, 8, 1))
buf168 = buf167; del buf167 # reuse
# Topologically Sorted Source Nodes: [input_160, input_161], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf168, primals_169, 32768, grid=grid(32768), stream=stream0)
del primals_169
# Topologically Sorted Source Nodes: [input_162], Original ATen: [aten.convolution]
buf169 = extern_kernels.convolution(buf168, primals_170, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf169, (4, 38, 8, 8), (2432, 64, 8, 1))
buf170 = buf169; del buf169 # reuse
# Topologically Sorted Source Nodes: [input_162], Original ATen: [aten.convolution]
triton_poi_fused_convolution_10.run(buf170, primals_171, 9728, grid=grid(9728), stream=stream0)
del primals_171
# Topologically Sorted Source Nodes: [input_163], Original ATen: [aten.convolution]
buf171 = extern_kernels.convolution(buf156, primals_172, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf171, (4, 128, 8, 8), (8192, 64, 8, 1))
buf172 = buf171; del buf171 # reuse
# Topologically Sorted Source Nodes: [input_163, input_164], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf172, primals_173, 32768, grid=grid(32768), stream=stream0)
del primals_173
# Topologically Sorted Source Nodes: [input_165], Original ATen: [aten.convolution]
buf173 = extern_kernels.convolution(buf172, primals_174, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf173, (4, 128, 8, 8), (8192, 64, 8, 1))
buf174 = buf173; del buf173 # reuse
# Topologically Sorted Source Nodes: [input_165, input_166], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf174, primals_175, 32768, grid=grid(32768), stream=stream0)
del primals_175
# Topologically Sorted Source Nodes: [input_167], Original ATen: [aten.convolution]
buf175 = extern_kernels.convolution(buf174, primals_176, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf175, (4, 128, 8, 8), (8192, 64, 8, 1))
buf176 = buf175; del buf175 # reuse
# Topologically Sorted Source Nodes: [input_167, input_168], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf176, primals_177, 32768, grid=grid(32768), stream=stream0)
del primals_177
# Topologically Sorted Source Nodes: [input_169], Original ATen: [aten.convolution]
buf177 = extern_kernels.convolution(buf176, primals_178, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf177, (4, 128, 8, 8), (8192, 64, 8, 1))
buf178 = buf177; del buf177 # reuse
# Topologically Sorted Source Nodes: [input_169, input_170], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf178, primals_179, 32768, grid=grid(32768), stream=stream0)
del primals_179
# Topologically Sorted Source Nodes: [input_171], Original ATen: [aten.convolution]
buf179 = extern_kernels.convolution(buf178, primals_180, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf179, (4, 128, 8, 8), (8192, 64, 8, 1))
buf180 = buf179; del buf179 # reuse
# Topologically Sorted Source Nodes: [input_171, input_172], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf180, primals_181, 32768, grid=grid(32768), stream=stream0)
del primals_181
# Topologically Sorted Source Nodes: [input_173], Original ATen: [aten.convolution]
buf181 = extern_kernels.convolution(buf180, primals_182, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf181, (4, 128, 8, 8), (8192, 64, 8, 1))
buf182 = buf181; del buf181 # reuse
# Topologically Sorted Source Nodes: [input_173, input_174], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf182, primals_183, 32768, grid=grid(32768), stream=stream0)
del primals_183
# Topologically Sorted Source Nodes: [input_175], Original ATen: [aten.convolution]
buf183 = extern_kernels.convolution(buf182, primals_184, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf183, (4, 19, 8, 8), (1216, 64, 8, 1))
buf184 = buf183; del buf183 # reuse
buf185 = empty_strided_cuda((4, 19, 8, 8), (1280, 64, 8, 1), torch.bool)
# Topologically Sorted Source Nodes: [input_175, input_176], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_11.run(buf184, primals_185, buf185, 4864, grid=grid(4864), stream=stream0)
del primals_185
return (buf170, buf184, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, primals_24, primals_26, primals_28, primals_30, primals_32, primals_34, primals_36, primals_38, primals_40, primals_42, primals_44, primals_46, primals_48, primals_50, primals_52, primals_54, primals_56, primals_58, primals_60, primals_62, primals_64, primals_66, primals_68, primals_70, primals_72, primals_74, primals_76, primals_78, primals_80, primals_82, primals_84, primals_86, primals_88, primals_90, primals_92, primals_94, primals_96, primals_98, primals_100, primals_102, primals_104, primals_106, primals_108, primals_110, primals_112, primals_114, primals_116, primals_118, primals_120, primals_122, primals_124, primals_126, primals_128, primals_130, primals_132, primals_134, primals_136, primals_138, primals_140, primals_142, primals_144, primals_146, primals_148, primals_150, primals_152, primals_154, primals_156, primals_158, primals_160, primals_162, primals_164, primals_166, primals_168, primals_170, primals_172, primals_174, primals_176, primals_178, primals_180, primals_182, primals_184, buf1, buf3, buf4, buf5, buf7, buf9, buf10, buf11, buf13, buf15, buf17, buf19, buf20, buf21, buf23, buf25, buf27, buf29, buf31, buf33, buf35, buf37, buf40, buf42, buf44, buf46, buf48, buf50, buf52, buf54, buf56, buf58, buf60, buf63, buf65, buf67, buf69, buf71, buf73, buf75, buf77, buf79, buf81, buf83, buf85, buf87, buf90, buf92, buf94, buf96, buf98, buf100, buf102, buf104, buf106, buf108, buf110, buf112, buf114, buf117, buf119, buf121, buf123, buf125, buf127, buf129, buf131, buf133, buf135, buf137, buf139, buf141, buf144, buf146, buf148, buf150, buf152, buf154, buf156, buf158, buf160, buf162, buf164, buf166, buf168, buf172, buf174, buf176, buf178, buf180, buf182, buf185, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((64, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((128, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((256, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((512, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_21 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_22 = rand_strided((256, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_23 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_24 = rand_strided((128, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_25 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_26 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_27 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_28 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_29 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_30 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_31 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_32 = rand_strided((512, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_33 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_34 = rand_strided((38, 512, 1, 1), (512, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_35 = rand_strided((38, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_36 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_37 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_38 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_39 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_40 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_41 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_42 = rand_strided((512, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_43 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_44 = rand_strided((19, 512, 1, 1), (512, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_45 = rand_strided((19, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_46 = rand_strided((128, 185, 7, 7), (9065, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_47 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_48 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_49 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_50 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_51 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_52 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_53 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_54 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_55 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_56 = rand_strided((128, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_57 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_58 = rand_strided((38, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_59 = rand_strided((38, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_60 = rand_strided((128, 185, 7, 7), (9065, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_61 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_62 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_63 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_64 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_65 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_66 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_67 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_68 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_69 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_70 = rand_strided((128, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_71 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_72 = rand_strided((19, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_73 = rand_strided((19, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_74 = rand_strided((128, 185, 7, 7), (9065, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_75 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_76 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_77 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_78 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_79 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_80 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_81 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_82 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_83 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_84 = rand_strided((128, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_85 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_86 = rand_strided((38, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_87 = rand_strided((38, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_88 = rand_strided((128, 185, 7, 7), (9065, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_89 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_90 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_91 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_92 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_93 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_94 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_95 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_96 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_97 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_98 = rand_strided((128, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_99 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_100 = rand_strided((19, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_101 = rand_strided((19, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_102 = rand_strided((128, 185, 7, 7), (9065, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_103 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_104 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_105 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_106 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_107 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_108 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_109 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_110 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_111 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_112 = rand_strided((128, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_113 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_114 = rand_strided((38, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_115 = rand_strided((38, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_116 = rand_strided((128, 185, 7, 7), (9065, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_117 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_118 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_119 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_120 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_121 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_122 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_123 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_124 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_125 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_126 = rand_strided((128, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_127 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_128 = rand_strided((19, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_129 = rand_strided((19, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_130 = rand_strided((128, 185, 7, 7), (9065, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_131 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_132 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_133 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_134 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_135 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_136 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_137 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_138 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_139 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_140 = rand_strided((128, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_141 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_142 = rand_strided((38, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_143 = rand_strided((38, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_144 = rand_strided((128, 185, 7, 7), (9065, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_145 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_146 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_147 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_148 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_149 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_150 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_151 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_152 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_153 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_154 = rand_strided((128, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_155 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_156 = rand_strided((19, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_157 = rand_strided((19, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_158 = rand_strided((128, 185, 7, 7), (9065, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_159 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_160 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_161 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_162 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_163 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_164 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_165 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_166 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_167 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_168 = rand_strided((128, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_169 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_170 = rand_strided((38, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_171 = rand_strided((38, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_172 = rand_strided((128, 185, 7, 7), (9065, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_173 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_174 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_175 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_176 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_177 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_178 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_179 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_180 = rand_strided((128, 128, 7, 7), (6272, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_181 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_182 = rand_strided((128, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_183 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_184 = rand_strided((19, 128, 1, 1), (128, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_185 = rand_strided((19, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61, primals_62, primals_63, primals_64, primals_65, primals_66, primals_67, primals_68, primals_69, primals_70, primals_71, primals_72, primals_73, primals_74, primals_75, primals_76, primals_77, primals_78, primals_79, primals_80, primals_81, primals_82, primals_83, primals_84, primals_85, primals_86, primals_87, primals_88, primals_89, primals_90, primals_91, primals_92, primals_93, primals_94, primals_95, primals_96, primals_97, primals_98, primals_99, primals_100, primals_101, primals_102, primals_103, primals_104, primals_105, primals_106, primals_107, primals_108, primals_109, primals_110, primals_111, primals_112, primals_113, primals_114, primals_115, primals_116, primals_117, primals_118, primals_119, primals_120, primals_121, primals_122, primals_123, primals_124, primals_125, primals_126, primals_127, primals_128, primals_129, primals_130, primals_131, primals_132, primals_133, primals_134, primals_135, primals_136, primals_137, primals_138, primals_139, primals_140, primals_141, primals_142, primals_143, primals_144, primals_145, primals_146, primals_147, primals_148, primals_149, primals_150, primals_151, primals_152, primals_153, primals_154, primals_155, primals_156, primals_157, primals_158, primals_159, primals_160, primals_161, primals_162, primals_163, primals_164, primals_165, primals_166, primals_167, primals_168, primals_169, primals_170, primals_171, primals_172, primals_173, primals_174, primals_175, primals_176, primals_177, primals_178, primals_179, primals_180, primals_181, primals_182, primals_183, primals_184, primals_185])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from collections import OrderedDict
import torch.nn as nn
def make_layers(block, no_relu_layers):
layers = []
for layer_name, v in block.items():
if 'pool' in layer_name:
layer = nn.MaxPool2d(kernel_size=v[0], stride=v[1], padding=v[2])
layers.append((layer_name, layer))
else:
conv2d = nn.Conv2d(in_channels=v[0], out_channels=v[1],
kernel_size=v[2], stride=v[3], padding=v[4])
layers.append((layer_name, conv2d))
if layer_name not in no_relu_layers:
layers.append(('relu_' + layer_name, nn.ReLU(inplace=True)))
return nn.Sequential(OrderedDict(layers))
class bodypose_model(nn.Module):
def __init__(self):
super(bodypose_model, self).__init__()
no_relu_layers = ['conv5_5_CPM_L1', 'conv5_5_CPM_L2',
'Mconv7_stage2_L1', 'Mconv7_stage2_L2', 'Mconv7_stage3_L1',
'Mconv7_stage3_L2', 'Mconv7_stage4_L1', 'Mconv7_stage4_L2',
'Mconv7_stage5_L1', 'Mconv7_stage5_L2', 'Mconv7_stage6_L1',
'Mconv7_stage6_L1']
blocks = {}
block0 = OrderedDict([('conv1_1', [3, 64, 3, 1, 1]), ('conv1_2', [
64, 64, 3, 1, 1]), ('pool1_stage1', [2, 2, 0]), ('conv2_1', [64,
128, 3, 1, 1]), ('conv2_2', [128, 128, 3, 1, 1]), (
'pool2_stage1', [2, 2, 0]), ('conv3_1', [128, 256, 3, 1, 1]), (
'conv3_2', [256, 256, 3, 1, 1]), ('conv3_3', [256, 256, 3, 1, 1
]), ('conv3_4', [256, 256, 3, 1, 1]), ('pool3_stage1', [2, 2, 0
]), ('conv4_1', [256, 512, 3, 1, 1]), ('conv4_2', [512, 512, 3,
1, 1]), ('conv4_3_CPM', [512, 256, 3, 1, 1]), ('conv4_4_CPM', [
256, 128, 3, 1, 1])])
block1_1 = OrderedDict([('conv5_1_CPM_L1', [128, 128, 3, 1, 1]), (
'conv5_2_CPM_L1', [128, 128, 3, 1, 1]), ('conv5_3_CPM_L1', [128,
128, 3, 1, 1]), ('conv5_4_CPM_L1', [128, 512, 1, 1, 0]), (
'conv5_5_CPM_L1', [512, 38, 1, 1, 0])])
block1_2 = OrderedDict([('conv5_1_CPM_L2', [128, 128, 3, 1, 1]), (
'conv5_2_CPM_L2', [128, 128, 3, 1, 1]), ('conv5_3_CPM_L2', [128,
128, 3, 1, 1]), ('conv5_4_CPM_L2', [128, 512, 1, 1, 0]), (
'conv5_5_CPM_L2', [512, 19, 1, 1, 0])])
blocks['block1_1'] = block1_1
blocks['block1_2'] = block1_2
self.model0 = make_layers(block0, no_relu_layers)
for i in range(2, 7):
blocks['block%d_1' % i] = OrderedDict([('Mconv1_stage%d_L1' % i,
[185, 128, 7, 1, 3]), ('Mconv2_stage%d_L1' % i, [128, 128,
7, 1, 3]), ('Mconv3_stage%d_L1' % i, [128, 128, 7, 1, 3]),
('Mconv4_stage%d_L1' % i, [128, 128, 7, 1, 3]), (
'Mconv5_stage%d_L1' % i, [128, 128, 7, 1, 3]), (
'Mconv6_stage%d_L1' % i, [128, 128, 1, 1, 0]), (
'Mconv7_stage%d_L1' % i, [128, 38, 1, 1, 0])])
blocks['block%d_2' % i] = OrderedDict([('Mconv1_stage%d_L2' % i,
[185, 128, 7, 1, 3]), ('Mconv2_stage%d_L2' % i, [128, 128,
7, 1, 3]), ('Mconv3_stage%d_L2' % i, [128, 128, 7, 1, 3]),
('Mconv4_stage%d_L2' % i, [128, 128, 7, 1, 3]), (
'Mconv5_stage%d_L2' % i, [128, 128, 7, 1, 3]), (
'Mconv6_stage%d_L2' % i, [128, 128, 1, 1, 0]), (
'Mconv7_stage%d_L2' % i, [128, 19, 1, 1, 0])])
for k in blocks.keys():
blocks[k] = make_layers(blocks[k], no_relu_layers)
self.model1_1 = blocks['block1_1']
self.model2_1 = blocks['block2_1']
self.model3_1 = blocks['block3_1']
self.model4_1 = blocks['block4_1']
self.model5_1 = blocks['block5_1']
self.model6_1 = blocks['block6_1']
self.model1_2 = blocks['block1_2']
self.model2_2 = blocks['block2_2']
self.model3_2 = blocks['block3_2']
self.model4_2 = blocks['block4_2']
self.model5_2 = blocks['block5_2']
self.model6_2 = blocks['block6_2']
def forward(self, x):
out1 = self.model0(x)
out1_1 = self.model1_1(out1)
out1_2 = self.model1_2(out1)
out2 = torch.cat([out1_1, out1_2, out1], 1)
out2_1 = self.model2_1(out2)
out2_2 = self.model2_2(out2)
out3 = torch.cat([out2_1, out2_2, out1], 1)
out3_1 = self.model3_1(out3)
out3_2 = self.model3_2(out3)
out4 = torch.cat([out3_1, out3_2, out1], 1)
out4_1 = self.model4_1(out4)
out4_2 = self.model4_2(out4)
out5 = torch.cat([out4_1, out4_2, out1], 1)
out5_1 = self.model5_1(out5)
out5_2 = self.model5_2(out5)
out6 = torch.cat([out5_1, out5_2, out1], 1)
out6_1 = self.model6_1(out6)
out6_2 = self.model6_2(out6)
return out6_1, out6_2
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from collections import OrderedDict
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = xindex // 32
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 128
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp5 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 256
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 32 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 32 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + 2 * x0 + 32 * x1), None, eviction_policy
='evict_last')
tmp5 = tl.load(in_ptr0 + (17 + 2 * x0 + 32 * x1), None, eviction_policy
='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 512
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 256
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_8(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 128
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_cat_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 47360
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 64 % 185
x0 = xindex % 64
x2 = xindex // 11840
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 38, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 64 * x1 + 2432 * x2), tmp4 & xmask,
other=0.0)
tmp6 = tl.load(in_ptr1 + x1, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tmp11 = tl.full([1], 57, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tmp10 & tmp12
tmp14 = tl.load(in_ptr2 + (x0 + 64 * (-38 + x1) + 1216 * x2), tmp13 &
xmask, other=0.0)
tmp15 = tl.load(in_ptr3 + (-38 + x1), tmp13 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp13, tmp16, tmp17)
tmp19 = tmp0 >= tmp11
tl.full([1], 185, tl.int64)
tmp22 = tl.load(in_ptr4 + (x0 + 64 * (-57 + x1) + 8192 * x2), tmp19 &
xmask, other=0.0)
tmp23 = tl.where(tmp13, tmp18, tmp22)
tmp24 = tl.where(tmp4, tmp9, tmp23)
tl.store(out_ptr0 + x3, tmp24, xmask)
@triton.jit
def triton_poi_fused_convolution_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 9728
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 64 % 38
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_11(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4864
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x1 = xindex // 64 % 19
x2 = xindex // 1216
x3 = xindex % 1216
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + (x3 + 1280 * x2), tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27,
primals_28, primals_29, primals_30, primals_31, primals_32,
primals_33, primals_34, primals_35, primals_36, primals_37,
primals_38, primals_39, primals_40, primals_41, primals_42,
primals_43, primals_44, primals_45, primals_46, primals_47,
primals_48, primals_49, primals_50, primals_51, primals_52,
primals_53, primals_54, primals_55, primals_56, primals_57,
primals_58, primals_59, primals_60, primals_61, primals_62,
primals_63, primals_64, primals_65, primals_66, primals_67,
primals_68, primals_69, primals_70, primals_71, primals_72,
primals_73, primals_74, primals_75, primals_76, primals_77,
primals_78, primals_79, primals_80, primals_81, primals_82,
primals_83, primals_84, primals_85, primals_86, primals_87,
primals_88, primals_89, primals_90, primals_91, primals_92,
primals_93, primals_94, primals_95, primals_96, primals_97,
primals_98, primals_99, primals_100, primals_101, primals_102,
primals_103, primals_104, primals_105, primals_106, primals_107,
primals_108, primals_109, primals_110, primals_111, primals_112,
primals_113, primals_114, primals_115, primals_116, primals_117,
primals_118, primals_119, primals_120, primals_121, primals_122,
primals_123, primals_124, primals_125, primals_126, primals_127,
primals_128, primals_129, primals_130, primals_131, primals_132,
primals_133, primals_134, primals_135, primals_136, primals_137,
primals_138, primals_139, primals_140, primals_141, primals_142,
primals_143, primals_144, primals_145, primals_146, primals_147,
primals_148, primals_149, primals_150, primals_151, primals_152,
primals_153, primals_154, primals_155, primals_156, primals_157,
primals_158, primals_159, primals_160, primals_161, primals_162,
primals_163, primals_164, primals_165, primals_166, primals_167,
primals_168, primals_169, primals_170, primals_171, primals_172,
primals_173, primals_174, primals_175, primals_176, primals_177,
primals_178, primals_179, primals_180, primals_181, primals_182,
primals_183, primals_184, primals_185) = args
args.clear()
assert_size_stride(primals_1, (64, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (128,), (1,))
assert_size_stride(primals_8, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_9, (128,), (1,))
assert_size_stride(primals_10, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_11, (256,), (1,))
assert_size_stride(primals_12, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_13, (256,), (1,))
assert_size_stride(primals_14, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_15, (256,), (1,))
assert_size_stride(primals_16, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_17, (256,), (1,))
assert_size_stride(primals_18, (512, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_19, (512,), (1,))
assert_size_stride(primals_20, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_21, (512,), (1,))
assert_size_stride(primals_22, (256, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_23, (256,), (1,))
assert_size_stride(primals_24, (128, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_25, (128,), (1,))
assert_size_stride(primals_26, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_27, (128,), (1,))
assert_size_stride(primals_28, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_29, (128,), (1,))
assert_size_stride(primals_30, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_31, (128,), (1,))
assert_size_stride(primals_32, (512, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_33, (512,), (1,))
assert_size_stride(primals_34, (38, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_35, (38,), (1,))
assert_size_stride(primals_36, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_37, (128,), (1,))
assert_size_stride(primals_38, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_39, (128,), (1,))
assert_size_stride(primals_40, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_41, (128,), (1,))
assert_size_stride(primals_42, (512, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_43, (512,), (1,))
assert_size_stride(primals_44, (19, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_45, (19,), (1,))
assert_size_stride(primals_46, (128, 185, 7, 7), (9065, 49, 7, 1))
assert_size_stride(primals_47, (128,), (1,))
assert_size_stride(primals_48, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_49, (128,), (1,))
assert_size_stride(primals_50, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_51, (128,), (1,))
assert_size_stride(primals_52, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_53, (128,), (1,))
assert_size_stride(primals_54, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_55, (128,), (1,))
assert_size_stride(primals_56, (128, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_57, (128,), (1,))
assert_size_stride(primals_58, (38, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_59, (38,), (1,))
assert_size_stride(primals_60, (128, 185, 7, 7), (9065, 49, 7, 1))
assert_size_stride(primals_61, (128,), (1,))
assert_size_stride(primals_62, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_63, (128,), (1,))
assert_size_stride(primals_64, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_65, (128,), (1,))
assert_size_stride(primals_66, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_67, (128,), (1,))
assert_size_stride(primals_68, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_69, (128,), (1,))
assert_size_stride(primals_70, (128, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_71, (128,), (1,))
assert_size_stride(primals_72, (19, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_73, (19,), (1,))
assert_size_stride(primals_74, (128, 185, 7, 7), (9065, 49, 7, 1))
assert_size_stride(primals_75, (128,), (1,))
assert_size_stride(primals_76, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_77, (128,), (1,))
assert_size_stride(primals_78, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_79, (128,), (1,))
assert_size_stride(primals_80, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_81, (128,), (1,))
assert_size_stride(primals_82, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_83, (128,), (1,))
assert_size_stride(primals_84, (128, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_85, (128,), (1,))
assert_size_stride(primals_86, (38, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_87, (38,), (1,))
assert_size_stride(primals_88, (128, 185, 7, 7), (9065, 49, 7, 1))
assert_size_stride(primals_89, (128,), (1,))
assert_size_stride(primals_90, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_91, (128,), (1,))
assert_size_stride(primals_92, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_93, (128,), (1,))
assert_size_stride(primals_94, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_95, (128,), (1,))
assert_size_stride(primals_96, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_97, (128,), (1,))
assert_size_stride(primals_98, (128, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_99, (128,), (1,))
assert_size_stride(primals_100, (19, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_101, (19,), (1,))
assert_size_stride(primals_102, (128, 185, 7, 7), (9065, 49, 7, 1))
assert_size_stride(primals_103, (128,), (1,))
assert_size_stride(primals_104, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_105, (128,), (1,))
assert_size_stride(primals_106, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_107, (128,), (1,))
assert_size_stride(primals_108, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_109, (128,), (1,))
assert_size_stride(primals_110, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_111, (128,), (1,))
assert_size_stride(primals_112, (128, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_113, (128,), (1,))
assert_size_stride(primals_114, (38, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_115, (38,), (1,))
assert_size_stride(primals_116, (128, 185, 7, 7), (9065, 49, 7, 1))
assert_size_stride(primals_117, (128,), (1,))
assert_size_stride(primals_118, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_119, (128,), (1,))
assert_size_stride(primals_120, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_121, (128,), (1,))
assert_size_stride(primals_122, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_123, (128,), (1,))
assert_size_stride(primals_124, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_125, (128,), (1,))
assert_size_stride(primals_126, (128, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_127, (128,), (1,))
assert_size_stride(primals_128, (19, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_129, (19,), (1,))
assert_size_stride(primals_130, (128, 185, 7, 7), (9065, 49, 7, 1))
assert_size_stride(primals_131, (128,), (1,))
assert_size_stride(primals_132, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_133, (128,), (1,))
assert_size_stride(primals_134, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_135, (128,), (1,))
assert_size_stride(primals_136, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_137, (128,), (1,))
assert_size_stride(primals_138, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_139, (128,), (1,))
assert_size_stride(primals_140, (128, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_141, (128,), (1,))
assert_size_stride(primals_142, (38, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_143, (38,), (1,))
assert_size_stride(primals_144, (128, 185, 7, 7), (9065, 49, 7, 1))
assert_size_stride(primals_145, (128,), (1,))
assert_size_stride(primals_146, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_147, (128,), (1,))
assert_size_stride(primals_148, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_149, (128,), (1,))
assert_size_stride(primals_150, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_151, (128,), (1,))
assert_size_stride(primals_152, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_153, (128,), (1,))
assert_size_stride(primals_154, (128, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_155, (128,), (1,))
assert_size_stride(primals_156, (19, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_157, (19,), (1,))
assert_size_stride(primals_158, (128, 185, 7, 7), (9065, 49, 7, 1))
assert_size_stride(primals_159, (128,), (1,))
assert_size_stride(primals_160, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_161, (128,), (1,))
assert_size_stride(primals_162, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_163, (128,), (1,))
assert_size_stride(primals_164, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_165, (128,), (1,))
assert_size_stride(primals_166, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_167, (128,), (1,))
assert_size_stride(primals_168, (128, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_169, (128,), (1,))
assert_size_stride(primals_170, (38, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_171, (38,), (1,))
assert_size_stride(primals_172, (128, 185, 7, 7), (9065, 49, 7, 1))
assert_size_stride(primals_173, (128,), (1,))
assert_size_stride(primals_174, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_175, (128,), (1,))
assert_size_stride(primals_176, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_177, (128,), (1,))
assert_size_stride(primals_178, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_179, (128,), (1,))
assert_size_stride(primals_180, (128, 128, 7, 7), (6272, 49, 7, 1))
assert_size_stride(primals_181, (128,), (1,))
assert_size_stride(primals_182, (128, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_183, (128,), (1,))
assert_size_stride(primals_184, (19, 128, 1, 1), (128, 1, 1, 1))
assert_size_stride(primals_185, (19,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(1048576)](buf1, primals_2,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_0[grid(1048576)](buf3, primals_5,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1),
torch.float32)
buf5 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(262144)](buf3, buf4,
buf5, 262144, XBLOCK=512, num_warps=8, num_stages=1)
buf6 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 128, 32, 32), (131072, 1024, 32, 1))
buf7 = buf6
del buf6
triton_poi_fused_convolution_relu_2[grid(524288)](buf7, primals_7,
524288, XBLOCK=512, num_warps=8, num_stages=1)
del primals_7
buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 128, 32, 32), (131072, 1024, 32, 1))
buf9 = buf8
del buf8
triton_poi_fused_convolution_relu_2[grid(524288)](buf9, primals_9,
524288, XBLOCK=512, num_warps=8, num_stages=1)
del primals_9
buf10 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1),
torch.float32)
buf11 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_3[grid(131072)](buf9,
buf10, buf11, 131072, XBLOCK=512, num_warps=8, num_stages=1)
buf12 = extern_kernels.convolution(buf10, primals_10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 256, 16, 16), (65536, 256, 16, 1))
buf13 = buf12
del buf12
triton_poi_fused_convolution_relu_4[grid(262144)](buf13, primals_11,
262144, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_11
buf14 = extern_kernels.convolution(buf13, primals_12, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 256, 16, 16), (65536, 256, 16, 1))
buf15 = buf14
del buf14
triton_poi_fused_convolution_relu_4[grid(262144)](buf15, primals_13,
262144, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_13
buf16 = extern_kernels.convolution(buf15, primals_14, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 256, 16, 16), (65536, 256, 16, 1))
buf17 = buf16
del buf16
triton_poi_fused_convolution_relu_4[grid(262144)](buf17, primals_15,
262144, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_15
buf18 = extern_kernels.convolution(buf17, primals_16, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 256, 16, 16), (65536, 256, 16, 1))
buf19 = buf18
del buf18
triton_poi_fused_convolution_relu_4[grid(262144)](buf19, primals_17,
262144, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_17
buf20 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch
.float32)
buf21 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch
.int8)
triton_poi_fused_max_pool2d_with_indices_5[grid(65536)](buf19,
buf20, buf21, 65536, XBLOCK=256, num_warps=4, num_stages=1)
buf22 = extern_kernels.convolution(buf20, primals_18, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf22, (4, 512, 8, 8), (32768, 64, 8, 1))
buf23 = buf22
del buf22
triton_poi_fused_convolution_relu_6[grid(131072)](buf23, primals_19,
131072, XBLOCK=512, num_warps=8, num_stages=1)
del primals_19
buf24 = extern_kernels.convolution(buf23, primals_20, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 512, 8, 8), (32768, 64, 8, 1))
buf25 = buf24
del buf24
triton_poi_fused_convolution_relu_6[grid(131072)](buf25, primals_21,
131072, XBLOCK=512, num_warps=8, num_stages=1)
del primals_21
buf26 = extern_kernels.convolution(buf25, primals_22, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf26, (4, 256, 8, 8), (16384, 64, 8, 1))
buf27 = buf26
del buf26
triton_poi_fused_convolution_relu_7[grid(65536)](buf27, primals_23,
65536, XBLOCK=256, num_warps=4, num_stages=1)
del primals_23
buf28 = extern_kernels.convolution(buf27, primals_24, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf28, (4, 128, 8, 8), (8192, 64, 8, 1))
buf29 = buf28
del buf28
triton_poi_fused_convolution_relu_8[grid(32768)](buf29, primals_25,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_25
buf30 = extern_kernels.convolution(buf29, primals_26, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf30, (4, 128, 8, 8), (8192, 64, 8, 1))
buf31 = buf30
del buf30
triton_poi_fused_convolution_relu_8[grid(32768)](buf31, primals_27,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_27
buf32 = extern_kernels.convolution(buf31, primals_28, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf32, (4, 128, 8, 8), (8192, 64, 8, 1))
buf33 = buf32
del buf32
triton_poi_fused_convolution_relu_8[grid(32768)](buf33, primals_29,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_29
buf34 = extern_kernels.convolution(buf33, primals_30, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf34, (4, 128, 8, 8), (8192, 64, 8, 1))
buf35 = buf34
del buf34
triton_poi_fused_convolution_relu_8[grid(32768)](buf35, primals_31,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_31
buf36 = extern_kernels.convolution(buf35, primals_32, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf36, (4, 512, 8, 8), (32768, 64, 8, 1))
buf37 = buf36
del buf36
triton_poi_fused_convolution_relu_6[grid(131072)](buf37, primals_33,
131072, XBLOCK=512, num_warps=8, num_stages=1)
del primals_33
buf38 = extern_kernels.convolution(buf37, primals_34, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf38, (4, 38, 8, 8), (2432, 64, 8, 1))
buf39 = extern_kernels.convolution(buf29, primals_36, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf39, (4, 128, 8, 8), (8192, 64, 8, 1))
buf40 = buf39
del buf39
triton_poi_fused_convolution_relu_8[grid(32768)](buf40, primals_37,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_37
buf41 = extern_kernels.convolution(buf40, primals_38, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf41, (4, 128, 8, 8), (8192, 64, 8, 1))
buf42 = buf41
del buf41
triton_poi_fused_convolution_relu_8[grid(32768)](buf42, primals_39,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_39
buf43 = extern_kernels.convolution(buf42, primals_40, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf43, (4, 128, 8, 8), (8192, 64, 8, 1))
buf44 = buf43
del buf43
triton_poi_fused_convolution_relu_8[grid(32768)](buf44, primals_41,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_41
buf45 = extern_kernels.convolution(buf44, primals_42, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf45, (4, 512, 8, 8), (32768, 64, 8, 1))
buf46 = buf45
del buf45
triton_poi_fused_convolution_relu_6[grid(131072)](buf46, primals_43,
131072, XBLOCK=512, num_warps=8, num_stages=1)
del primals_43
buf47 = extern_kernels.convolution(buf46, primals_44, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf47, (4, 19, 8, 8), (1216, 64, 8, 1))
buf48 = empty_strided_cuda((4, 185, 8, 8), (11840, 64, 8, 1), torch
.float32)
triton_poi_fused_cat_9[grid(47360)](buf38, primals_35, buf47,
primals_45, buf29, buf48, 47360, XBLOCK=256, num_warps=4,
num_stages=1)
del buf38
del buf47
del primals_35
del primals_45
buf49 = extern_kernels.convolution(buf48, primals_46, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf49, (4, 128, 8, 8), (8192, 64, 8, 1))
buf50 = buf49
del buf49
triton_poi_fused_convolution_relu_8[grid(32768)](buf50, primals_47,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_47
buf51 = extern_kernels.convolution(buf50, primals_48, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf51, (4, 128, 8, 8), (8192, 64, 8, 1))
buf52 = buf51
del buf51
triton_poi_fused_convolution_relu_8[grid(32768)](buf52, primals_49,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_49
buf53 = extern_kernels.convolution(buf52, primals_50, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf53, (4, 128, 8, 8), (8192, 64, 8, 1))
buf54 = buf53
del buf53
triton_poi_fused_convolution_relu_8[grid(32768)](buf54, primals_51,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_51
buf55 = extern_kernels.convolution(buf54, primals_52, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf55, (4, 128, 8, 8), (8192, 64, 8, 1))
buf56 = buf55
del buf55
triton_poi_fused_convolution_relu_8[grid(32768)](buf56, primals_53,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_53
buf57 = extern_kernels.convolution(buf56, primals_54, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf57, (4, 128, 8, 8), (8192, 64, 8, 1))
buf58 = buf57
del buf57
triton_poi_fused_convolution_relu_8[grid(32768)](buf58, primals_55,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_55
buf59 = extern_kernels.convolution(buf58, primals_56, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf59, (4, 128, 8, 8), (8192, 64, 8, 1))
buf60 = buf59
del buf59
triton_poi_fused_convolution_relu_8[grid(32768)](buf60, primals_57,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_57
buf61 = extern_kernels.convolution(buf60, primals_58, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf61, (4, 38, 8, 8), (2432, 64, 8, 1))
buf62 = extern_kernels.convolution(buf48, primals_60, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf62, (4, 128, 8, 8), (8192, 64, 8, 1))
buf63 = buf62
del buf62
triton_poi_fused_convolution_relu_8[grid(32768)](buf63, primals_61,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_61
buf64 = extern_kernels.convolution(buf63, primals_62, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf64, (4, 128, 8, 8), (8192, 64, 8, 1))
buf65 = buf64
del buf64
triton_poi_fused_convolution_relu_8[grid(32768)](buf65, primals_63,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_63
buf66 = extern_kernels.convolution(buf65, primals_64, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf66, (4, 128, 8, 8), (8192, 64, 8, 1))
buf67 = buf66
del buf66
triton_poi_fused_convolution_relu_8[grid(32768)](buf67, primals_65,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_65
buf68 = extern_kernels.convolution(buf67, primals_66, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf68, (4, 128, 8, 8), (8192, 64, 8, 1))
buf69 = buf68
del buf68
triton_poi_fused_convolution_relu_8[grid(32768)](buf69, primals_67,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_67
buf70 = extern_kernels.convolution(buf69, primals_68, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf70, (4, 128, 8, 8), (8192, 64, 8, 1))
buf71 = buf70
del buf70
triton_poi_fused_convolution_relu_8[grid(32768)](buf71, primals_69,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_69
buf72 = extern_kernels.convolution(buf71, primals_70, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf72, (4, 128, 8, 8), (8192, 64, 8, 1))
buf73 = buf72
del buf72
triton_poi_fused_convolution_relu_8[grid(32768)](buf73, primals_71,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_71
buf74 = extern_kernels.convolution(buf73, primals_72, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf74, (4, 19, 8, 8), (1216, 64, 8, 1))
buf75 = empty_strided_cuda((4, 185, 8, 8), (11840, 64, 8, 1), torch
.float32)
triton_poi_fused_cat_9[grid(47360)](buf61, primals_59, buf74,
primals_73, buf29, buf75, 47360, XBLOCK=256, num_warps=4,
num_stages=1)
del buf61
del buf74
del primals_59
del primals_73
buf76 = extern_kernels.convolution(buf75, primals_74, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf76, (4, 128, 8, 8), (8192, 64, 8, 1))
buf77 = buf76
del buf76
triton_poi_fused_convolution_relu_8[grid(32768)](buf77, primals_75,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_75
buf78 = extern_kernels.convolution(buf77, primals_76, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf78, (4, 128, 8, 8), (8192, 64, 8, 1))
buf79 = buf78
del buf78
triton_poi_fused_convolution_relu_8[grid(32768)](buf79, primals_77,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_77
buf80 = extern_kernels.convolution(buf79, primals_78, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf80, (4, 128, 8, 8), (8192, 64, 8, 1))
buf81 = buf80
del buf80
triton_poi_fused_convolution_relu_8[grid(32768)](buf81, primals_79,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_79
buf82 = extern_kernels.convolution(buf81, primals_80, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf82, (4, 128, 8, 8), (8192, 64, 8, 1))
buf83 = buf82
del buf82
triton_poi_fused_convolution_relu_8[grid(32768)](buf83, primals_81,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_81
buf84 = extern_kernels.convolution(buf83, primals_82, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf84, (4, 128, 8, 8), (8192, 64, 8, 1))
buf85 = buf84
del buf84
triton_poi_fused_convolution_relu_8[grid(32768)](buf85, primals_83,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_83
buf86 = extern_kernels.convolution(buf85, primals_84, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf86, (4, 128, 8, 8), (8192, 64, 8, 1))
buf87 = buf86
del buf86
triton_poi_fused_convolution_relu_8[grid(32768)](buf87, primals_85,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_85
buf88 = extern_kernels.convolution(buf87, primals_86, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf88, (4, 38, 8, 8), (2432, 64, 8, 1))
buf89 = extern_kernels.convolution(buf75, primals_88, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf89, (4, 128, 8, 8), (8192, 64, 8, 1))
buf90 = buf89
del buf89
triton_poi_fused_convolution_relu_8[grid(32768)](buf90, primals_89,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_89
buf91 = extern_kernels.convolution(buf90, primals_90, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf91, (4, 128, 8, 8), (8192, 64, 8, 1))
buf92 = buf91
del buf91
triton_poi_fused_convolution_relu_8[grid(32768)](buf92, primals_91,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_91
buf93 = extern_kernels.convolution(buf92, primals_92, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf93, (4, 128, 8, 8), (8192, 64, 8, 1))
buf94 = buf93
del buf93
triton_poi_fused_convolution_relu_8[grid(32768)](buf94, primals_93,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_93
buf95 = extern_kernels.convolution(buf94, primals_94, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf95, (4, 128, 8, 8), (8192, 64, 8, 1))
buf96 = buf95
del buf95
triton_poi_fused_convolution_relu_8[grid(32768)](buf96, primals_95,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_95
buf97 = extern_kernels.convolution(buf96, primals_96, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf97, (4, 128, 8, 8), (8192, 64, 8, 1))
buf98 = buf97
del buf97
triton_poi_fused_convolution_relu_8[grid(32768)](buf98, primals_97,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_97
buf99 = extern_kernels.convolution(buf98, primals_98, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf99, (4, 128, 8, 8), (8192, 64, 8, 1))
buf100 = buf99
del buf99
triton_poi_fused_convolution_relu_8[grid(32768)](buf100, primals_99,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_99
buf101 = extern_kernels.convolution(buf100, primals_100, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf101, (4, 19, 8, 8), (1216, 64, 8, 1))
buf102 = empty_strided_cuda((4, 185, 8, 8), (11840, 64, 8, 1),
torch.float32)
triton_poi_fused_cat_9[grid(47360)](buf88, primals_87, buf101,
primals_101, buf29, buf102, 47360, XBLOCK=256, num_warps=4,
num_stages=1)
del buf101
del buf88
del primals_101
del primals_87
buf103 = extern_kernels.convolution(buf102, primals_102, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf103, (4, 128, 8, 8), (8192, 64, 8, 1))
buf104 = buf103
del buf103
triton_poi_fused_convolution_relu_8[grid(32768)](buf104,
primals_103, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_103
buf105 = extern_kernels.convolution(buf104, primals_104, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf105, (4, 128, 8, 8), (8192, 64, 8, 1))
buf106 = buf105
del buf105
triton_poi_fused_convolution_relu_8[grid(32768)](buf106,
primals_105, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_105
buf107 = extern_kernels.convolution(buf106, primals_106, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf107, (4, 128, 8, 8), (8192, 64, 8, 1))
buf108 = buf107
del buf107
triton_poi_fused_convolution_relu_8[grid(32768)](buf108,
primals_107, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_107
buf109 = extern_kernels.convolution(buf108, primals_108, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf109, (4, 128, 8, 8), (8192, 64, 8, 1))
buf110 = buf109
del buf109
triton_poi_fused_convolution_relu_8[grid(32768)](buf110,
primals_109, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_109
buf111 = extern_kernels.convolution(buf110, primals_110, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf111, (4, 128, 8, 8), (8192, 64, 8, 1))
buf112 = buf111
del buf111
triton_poi_fused_convolution_relu_8[grid(32768)](buf112,
primals_111, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_111
buf113 = extern_kernels.convolution(buf112, primals_112, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf113, (4, 128, 8, 8), (8192, 64, 8, 1))
buf114 = buf113
del buf113
triton_poi_fused_convolution_relu_8[grid(32768)](buf114,
primals_113, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_113
buf115 = extern_kernels.convolution(buf114, primals_114, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf115, (4, 38, 8, 8), (2432, 64, 8, 1))
buf116 = extern_kernels.convolution(buf102, primals_116, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf116, (4, 128, 8, 8), (8192, 64, 8, 1))
buf117 = buf116
del buf116
triton_poi_fused_convolution_relu_8[grid(32768)](buf117,
primals_117, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_117
buf118 = extern_kernels.convolution(buf117, primals_118, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf118, (4, 128, 8, 8), (8192, 64, 8, 1))
buf119 = buf118
del buf118
triton_poi_fused_convolution_relu_8[grid(32768)](buf119,
primals_119, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_119
buf120 = extern_kernels.convolution(buf119, primals_120, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf120, (4, 128, 8, 8), (8192, 64, 8, 1))
buf121 = buf120
del buf120
triton_poi_fused_convolution_relu_8[grid(32768)](buf121,
primals_121, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_121
buf122 = extern_kernels.convolution(buf121, primals_122, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf122, (4, 128, 8, 8), (8192, 64, 8, 1))
buf123 = buf122
del buf122
triton_poi_fused_convolution_relu_8[grid(32768)](buf123,
primals_123, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_123
buf124 = extern_kernels.convolution(buf123, primals_124, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf124, (4, 128, 8, 8), (8192, 64, 8, 1))
buf125 = buf124
del buf124
triton_poi_fused_convolution_relu_8[grid(32768)](buf125,
primals_125, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_125
buf126 = extern_kernels.convolution(buf125, primals_126, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf126, (4, 128, 8, 8), (8192, 64, 8, 1))
buf127 = buf126
del buf126
triton_poi_fused_convolution_relu_8[grid(32768)](buf127,
primals_127, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_127
buf128 = extern_kernels.convolution(buf127, primals_128, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf128, (4, 19, 8, 8), (1216, 64, 8, 1))
buf129 = empty_strided_cuda((4, 185, 8, 8), (11840, 64, 8, 1),
torch.float32)
triton_poi_fused_cat_9[grid(47360)](buf115, primals_115, buf128,
primals_129, buf29, buf129, 47360, XBLOCK=256, num_warps=4,
num_stages=1)
del buf115
del buf128
del primals_115
del primals_129
buf130 = extern_kernels.convolution(buf129, primals_130, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf130, (4, 128, 8, 8), (8192, 64, 8, 1))
buf131 = buf130
del buf130
triton_poi_fused_convolution_relu_8[grid(32768)](buf131,
primals_131, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_131
buf132 = extern_kernels.convolution(buf131, primals_132, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf132, (4, 128, 8, 8), (8192, 64, 8, 1))
buf133 = buf132
del buf132
triton_poi_fused_convolution_relu_8[grid(32768)](buf133,
primals_133, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_133
buf134 = extern_kernels.convolution(buf133, primals_134, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf134, (4, 128, 8, 8), (8192, 64, 8, 1))
buf135 = buf134
del buf134
triton_poi_fused_convolution_relu_8[grid(32768)](buf135,
primals_135, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_135
buf136 = extern_kernels.convolution(buf135, primals_136, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf136, (4, 128, 8, 8), (8192, 64, 8, 1))
buf137 = buf136
del buf136
triton_poi_fused_convolution_relu_8[grid(32768)](buf137,
primals_137, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_137
buf138 = extern_kernels.convolution(buf137, primals_138, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf138, (4, 128, 8, 8), (8192, 64, 8, 1))
buf139 = buf138
del buf138
triton_poi_fused_convolution_relu_8[grid(32768)](buf139,
primals_139, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_139
buf140 = extern_kernels.convolution(buf139, primals_140, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf140, (4, 128, 8, 8), (8192, 64, 8, 1))
buf141 = buf140
del buf140
triton_poi_fused_convolution_relu_8[grid(32768)](buf141,
primals_141, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_141
buf142 = extern_kernels.convolution(buf141, primals_142, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf142, (4, 38, 8, 8), (2432, 64, 8, 1))
buf143 = extern_kernels.convolution(buf129, primals_144, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf143, (4, 128, 8, 8), (8192, 64, 8, 1))
buf144 = buf143
del buf143
triton_poi_fused_convolution_relu_8[grid(32768)](buf144,
primals_145, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_145
buf145 = extern_kernels.convolution(buf144, primals_146, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf145, (4, 128, 8, 8), (8192, 64, 8, 1))
buf146 = buf145
del buf145
triton_poi_fused_convolution_relu_8[grid(32768)](buf146,
primals_147, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_147
buf147 = extern_kernels.convolution(buf146, primals_148, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf147, (4, 128, 8, 8), (8192, 64, 8, 1))
buf148 = buf147
del buf147
triton_poi_fused_convolution_relu_8[grid(32768)](buf148,
primals_149, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_149
buf149 = extern_kernels.convolution(buf148, primals_150, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf149, (4, 128, 8, 8), (8192, 64, 8, 1))
buf150 = buf149
del buf149
triton_poi_fused_convolution_relu_8[grid(32768)](buf150,
primals_151, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_151
buf151 = extern_kernels.convolution(buf150, primals_152, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf151, (4, 128, 8, 8), (8192, 64, 8, 1))
buf152 = buf151
del buf151
triton_poi_fused_convolution_relu_8[grid(32768)](buf152,
primals_153, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_153
buf153 = extern_kernels.convolution(buf152, primals_154, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf153, (4, 128, 8, 8), (8192, 64, 8, 1))
buf154 = buf153
del buf153
triton_poi_fused_convolution_relu_8[grid(32768)](buf154,
primals_155, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_155
buf155 = extern_kernels.convolution(buf154, primals_156, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf155, (4, 19, 8, 8), (1216, 64, 8, 1))
buf156 = empty_strided_cuda((4, 185, 8, 8), (11840, 64, 8, 1),
torch.float32)
triton_poi_fused_cat_9[grid(47360)](buf142, primals_143, buf155,
primals_157, buf29, buf156, 47360, XBLOCK=256, num_warps=4,
num_stages=1)
del buf142
del buf155
del primals_143
del primals_157
buf157 = extern_kernels.convolution(buf156, primals_158, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf157, (4, 128, 8, 8), (8192, 64, 8, 1))
buf158 = buf157
del buf157
triton_poi_fused_convolution_relu_8[grid(32768)](buf158,
primals_159, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_159
buf159 = extern_kernels.convolution(buf158, primals_160, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf159, (4, 128, 8, 8), (8192, 64, 8, 1))
buf160 = buf159
del buf159
triton_poi_fused_convolution_relu_8[grid(32768)](buf160,
primals_161, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_161
buf161 = extern_kernels.convolution(buf160, primals_162, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf161, (4, 128, 8, 8), (8192, 64, 8, 1))
buf162 = buf161
del buf161
triton_poi_fused_convolution_relu_8[grid(32768)](buf162,
primals_163, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_163
buf163 = extern_kernels.convolution(buf162, primals_164, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf163, (4, 128, 8, 8), (8192, 64, 8, 1))
buf164 = buf163
del buf163
triton_poi_fused_convolution_relu_8[grid(32768)](buf164,
primals_165, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_165
buf165 = extern_kernels.convolution(buf164, primals_166, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf165, (4, 128, 8, 8), (8192, 64, 8, 1))
buf166 = buf165
del buf165
triton_poi_fused_convolution_relu_8[grid(32768)](buf166,
primals_167, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_167
buf167 = extern_kernels.convolution(buf166, primals_168, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf167, (4, 128, 8, 8), (8192, 64, 8, 1))
buf168 = buf167
del buf167
triton_poi_fused_convolution_relu_8[grid(32768)](buf168,
primals_169, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_169
buf169 = extern_kernels.convolution(buf168, primals_170, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf169, (4, 38, 8, 8), (2432, 64, 8, 1))
buf170 = buf169
del buf169
triton_poi_fused_convolution_10[grid(9728)](buf170, primals_171,
9728, XBLOCK=128, num_warps=4, num_stages=1)
del primals_171
buf171 = extern_kernels.convolution(buf156, primals_172, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf171, (4, 128, 8, 8), (8192, 64, 8, 1))
buf172 = buf171
del buf171
triton_poi_fused_convolution_relu_8[grid(32768)](buf172,
primals_173, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_173
buf173 = extern_kernels.convolution(buf172, primals_174, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf173, (4, 128, 8, 8), (8192, 64, 8, 1))
buf174 = buf173
del buf173
triton_poi_fused_convolution_relu_8[grid(32768)](buf174,
primals_175, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_175
buf175 = extern_kernels.convolution(buf174, primals_176, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf175, (4, 128, 8, 8), (8192, 64, 8, 1))
buf176 = buf175
del buf175
triton_poi_fused_convolution_relu_8[grid(32768)](buf176,
primals_177, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_177
buf177 = extern_kernels.convolution(buf176, primals_178, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf177, (4, 128, 8, 8), (8192, 64, 8, 1))
buf178 = buf177
del buf177
triton_poi_fused_convolution_relu_8[grid(32768)](buf178,
primals_179, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_179
buf179 = extern_kernels.convolution(buf178, primals_180, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf179, (4, 128, 8, 8), (8192, 64, 8, 1))
buf180 = buf179
del buf179
triton_poi_fused_convolution_relu_8[grid(32768)](buf180,
primals_181, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_181
buf181 = extern_kernels.convolution(buf180, primals_182, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf181, (4, 128, 8, 8), (8192, 64, 8, 1))
buf182 = buf181
del buf181
triton_poi_fused_convolution_relu_8[grid(32768)](buf182,
primals_183, 32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_183
buf183 = extern_kernels.convolution(buf182, primals_184, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf183, (4, 19, 8, 8), (1216, 64, 8, 1))
buf184 = buf183
del buf183
buf185 = empty_strided_cuda((4, 19, 8, 8), (1280, 64, 8, 1), torch.bool
)
triton_poi_fused_convolution_relu_threshold_backward_11[grid(4864)](
buf184, primals_185, buf185, 4864, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_185
return (buf170, buf184, primals_1, primals_3, primals_4, primals_6,
primals_8, primals_10, primals_12, primals_14, primals_16,
primals_18, primals_20, primals_22, primals_24, primals_26,
primals_28, primals_30, primals_32, primals_34, primals_36,
primals_38, primals_40, primals_42, primals_44, primals_46,
primals_48, primals_50, primals_52, primals_54, primals_56,
primals_58, primals_60, primals_62, primals_64, primals_66,
primals_68, primals_70, primals_72, primals_74, primals_76,
primals_78, primals_80, primals_82, primals_84, primals_86,
primals_88, primals_90, primals_92, primals_94, primals_96,
primals_98, primals_100, primals_102, primals_104, primals_106,
primals_108, primals_110, primals_112, primals_114, primals_116,
primals_118, primals_120, primals_122, primals_124, primals_126,
primals_128, primals_130, primals_132, primals_134, primals_136,
primals_138, primals_140, primals_142, primals_144, primals_146,
primals_148, primals_150, primals_152, primals_154, primals_156,
primals_158, primals_160, primals_162, primals_164, primals_166,
primals_168, primals_170, primals_172, primals_174, primals_176,
primals_178, primals_180, primals_182, primals_184, buf1, buf3,
buf4, buf5, buf7, buf9, buf10, buf11, buf13, buf15, buf17, buf19,
buf20, buf21, buf23, buf25, buf27, buf29, buf31, buf33, buf35,
buf37, buf40, buf42, buf44, buf46, buf48, buf50, buf52, buf54,
buf56, buf58, buf60, buf63, buf65, buf67, buf69, buf71, buf73,
buf75, buf77, buf79, buf81, buf83, buf85, buf87, buf90, buf92,
buf94, buf96, buf98, buf100, buf102, buf104, buf106, buf108, buf110,
buf112, buf114, buf117, buf119, buf121, buf123, buf125, buf127,
buf129, buf131, buf133, buf135, buf137, buf139, buf141, buf144,
buf146, buf148, buf150, buf152, buf154, buf156, buf158, buf160,
buf162, buf164, buf166, buf168, buf172, buf174, buf176, buf178,
buf180, buf182, buf185)
def make_layers(block, no_relu_layers):
layers = []
for layer_name, v in block.items():
if 'pool' in layer_name:
layer = nn.MaxPool2d(kernel_size=v[0], stride=v[1], padding=v[2])
layers.append((layer_name, layer))
else:
conv2d = nn.Conv2d(in_channels=v[0], out_channels=v[1],
kernel_size=v[2], stride=v[3], padding=v[4])
layers.append((layer_name, conv2d))
if layer_name not in no_relu_layers:
layers.append(('relu_' + layer_name, nn.ReLU(inplace=True)))
return nn.Sequential(OrderedDict(layers))
class bodypose_modelNew(nn.Module):
def __init__(self):
super(bodypose_modelNew, self).__init__()
no_relu_layers = ['conv5_5_CPM_L1', 'conv5_5_CPM_L2',
'Mconv7_stage2_L1', 'Mconv7_stage2_L2', 'Mconv7_stage3_L1',
'Mconv7_stage3_L2', 'Mconv7_stage4_L1', 'Mconv7_stage4_L2',
'Mconv7_stage5_L1', 'Mconv7_stage5_L2', 'Mconv7_stage6_L1',
'Mconv7_stage6_L1']
blocks = {}
block0 = OrderedDict([('conv1_1', [3, 64, 3, 1, 1]), ('conv1_2', [
64, 64, 3, 1, 1]), ('pool1_stage1', [2, 2, 0]), ('conv2_1', [64,
128, 3, 1, 1]), ('conv2_2', [128, 128, 3, 1, 1]), (
'pool2_stage1', [2, 2, 0]), ('conv3_1', [128, 256, 3, 1, 1]), (
'conv3_2', [256, 256, 3, 1, 1]), ('conv3_3', [256, 256, 3, 1, 1
]), ('conv3_4', [256, 256, 3, 1, 1]), ('pool3_stage1', [2, 2, 0
]), ('conv4_1', [256, 512, 3, 1, 1]), ('conv4_2', [512, 512, 3,
1, 1]), ('conv4_3_CPM', [512, 256, 3, 1, 1]), ('conv4_4_CPM', [
256, 128, 3, 1, 1])])
block1_1 = OrderedDict([('conv5_1_CPM_L1', [128, 128, 3, 1, 1]), (
'conv5_2_CPM_L1', [128, 128, 3, 1, 1]), ('conv5_3_CPM_L1', [128,
128, 3, 1, 1]), ('conv5_4_CPM_L1', [128, 512, 1, 1, 0]), (
'conv5_5_CPM_L1', [512, 38, 1, 1, 0])])
block1_2 = OrderedDict([('conv5_1_CPM_L2', [128, 128, 3, 1, 1]), (
'conv5_2_CPM_L2', [128, 128, 3, 1, 1]), ('conv5_3_CPM_L2', [128,
128, 3, 1, 1]), ('conv5_4_CPM_L2', [128, 512, 1, 1, 0]), (
'conv5_5_CPM_L2', [512, 19, 1, 1, 0])])
blocks['block1_1'] = block1_1
blocks['block1_2'] = block1_2
self.model0 = make_layers(block0, no_relu_layers)
for i in range(2, 7):
blocks['block%d_1' % i] = OrderedDict([('Mconv1_stage%d_L1' % i,
[185, 128, 7, 1, 3]), ('Mconv2_stage%d_L1' % i, [128, 128,
7, 1, 3]), ('Mconv3_stage%d_L1' % i, [128, 128, 7, 1, 3]),
('Mconv4_stage%d_L1' % i, [128, 128, 7, 1, 3]), (
'Mconv5_stage%d_L1' % i, [128, 128, 7, 1, 3]), (
'Mconv6_stage%d_L1' % i, [128, 128, 1, 1, 0]), (
'Mconv7_stage%d_L1' % i, [128, 38, 1, 1, 0])])
blocks['block%d_2' % i] = OrderedDict([('Mconv1_stage%d_L2' % i,
[185, 128, 7, 1, 3]), ('Mconv2_stage%d_L2' % i, [128, 128,
7, 1, 3]), ('Mconv3_stage%d_L2' % i, [128, 128, 7, 1, 3]),
('Mconv4_stage%d_L2' % i, [128, 128, 7, 1, 3]), (
'Mconv5_stage%d_L2' % i, [128, 128, 7, 1, 3]), (
'Mconv6_stage%d_L2' % i, [128, 128, 1, 1, 0]), (
'Mconv7_stage%d_L2' % i, [128, 19, 1, 1, 0])])
for k in blocks.keys():
blocks[k] = make_layers(blocks[k], no_relu_layers)
self.model1_1 = blocks['block1_1']
self.model2_1 = blocks['block2_1']
self.model3_1 = blocks['block3_1']
self.model4_1 = blocks['block4_1']
self.model5_1 = blocks['block5_1']
self.model6_1 = blocks['block6_1']
self.model1_2 = blocks['block1_2']
self.model2_2 = blocks['block2_2']
self.model3_2 = blocks['block3_2']
self.model4_2 = blocks['block4_2']
self.model5_2 = blocks['block5_2']
self.model6_2 = blocks['block6_2']
def forward(self, input_0):
primals_1 = self.model0.conv1_1.weight
primals_2 = self.model0.conv1_1.bias
primals_4 = self.model0.conv1_2.weight
primals_5 = self.model0.conv1_2.bias
primals_6 = self.model0.conv2_1.weight
primals_7 = self.model0.conv2_1.bias
primals_8 = self.model0.conv2_2.weight
primals_9 = self.model0.conv2_2.bias
primals_10 = self.model0.conv3_1.weight
primals_11 = self.model0.conv3_1.bias
primals_12 = self.model0.conv3_2.weight
primals_13 = self.model0.conv3_2.bias
primals_14 = self.model0.conv3_3.weight
primals_15 = self.model0.conv3_3.bias
primals_16 = self.model0.conv3_4.weight
primals_17 = self.model0.conv3_4.bias
primals_18 = self.model0.conv4_1.weight
primals_19 = self.model0.conv4_1.bias
primals_20 = self.model0.conv4_2.weight
primals_21 = self.model0.conv4_2.bias
primals_22 = self.model0.conv4_3_CPM.weight
primals_23 = self.model0.conv4_3_CPM.bias
primals_24 = self.model0.conv4_4_CPM.weight
primals_25 = self.model0.conv4_4_CPM.bias
primals_26 = self.model1_1.conv5_1_CPM_L1.weight
primals_27 = self.model1_1.conv5_1_CPM_L1.bias
primals_28 = self.model1_1.conv5_2_CPM_L1.weight
primals_29 = self.model1_1.conv5_2_CPM_L1.bias
primals_30 = self.model1_1.conv5_3_CPM_L1.weight
primals_31 = self.model1_1.conv5_3_CPM_L1.bias
primals_32 = self.model1_1.conv5_4_CPM_L1.weight
primals_33 = self.model1_1.conv5_4_CPM_L1.bias
primals_34 = self.model1_1.conv5_5_CPM_L1.weight
primals_35 = self.model1_1.conv5_5_CPM_L1.bias
primals_46 = self.model2_1.Mconv1_stage2_L1.weight
primals_37 = self.model2_1.Mconv1_stage2_L1.bias
primals_48 = self.model2_1.Mconv2_stage2_L1.weight
primals_39 = self.model2_1.Mconv2_stage2_L1.bias
primals_50 = self.model2_1.Mconv3_stage2_L1.weight
primals_41 = self.model2_1.Mconv3_stage2_L1.bias
primals_52 = self.model2_1.Mconv4_stage2_L1.weight
primals_47 = self.model2_1.Mconv4_stage2_L1.bias
primals_54 = self.model2_1.Mconv5_stage2_L1.weight
primals_49 = self.model2_1.Mconv5_stage2_L1.bias
primals_56 = self.model2_1.Mconv6_stage2_L1.weight
primals_51 = self.model2_1.Mconv6_stage2_L1.bias
primals_58 = self.model2_1.Mconv7_stage2_L1.weight
primals_59 = self.model2_1.Mconv7_stage2_L1.bias
primals_60 = self.model3_1.Mconv1_stage3_L1.weight
primals_53 = self.model3_1.Mconv1_stage3_L1.bias
primals_62 = self.model3_1.Mconv2_stage3_L1.weight
primals_55 = self.model3_1.Mconv2_stage3_L1.bias
primals_64 = self.model3_1.Mconv3_stage3_L1.weight
primals_57 = self.model3_1.Mconv3_stage3_L1.bias
primals_66 = self.model3_1.Mconv4_stage3_L1.weight
primals_61 = self.model3_1.Mconv4_stage3_L1.bias
primals_68 = self.model3_1.Mconv5_stage3_L1.weight
primals_63 = self.model3_1.Mconv5_stage3_L1.bias
primals_70 = self.model3_1.Mconv6_stage3_L1.weight
primals_65 = self.model3_1.Mconv6_stage3_L1.bias
primals_86 = self.model3_1.Mconv7_stage3_L1.weight
primals_87 = self.model3_1.Mconv7_stage3_L1.bias
primals_74 = self.model4_1.Mconv1_stage4_L1.weight
primals_67 = self.model4_1.Mconv1_stage4_L1.bias
primals_76 = self.model4_1.Mconv2_stage4_L1.weight
primals_69 = self.model4_1.Mconv2_stage4_L1.bias
primals_78 = self.model4_1.Mconv3_stage4_L1.weight
primals_71 = self.model4_1.Mconv3_stage4_L1.bias
primals_80 = self.model4_1.Mconv4_stage4_L1.weight
primals_75 = self.model4_1.Mconv4_stage4_L1.bias
primals_82 = self.model4_1.Mconv5_stage4_L1.weight
primals_77 = self.model4_1.Mconv5_stage4_L1.bias
primals_84 = self.model4_1.Mconv6_stage4_L1.weight
primals_79 = self.model4_1.Mconv6_stage4_L1.bias
primals_114 = self.model4_1.Mconv7_stage4_L1.weight
primals_115 = self.model4_1.Mconv7_stage4_L1.bias
primals_88 = self.model5_1.Mconv1_stage5_L1.weight
primals_81 = self.model5_1.Mconv1_stage5_L1.bias
primals_90 = self.model5_1.Mconv2_stage5_L1.weight
primals_83 = self.model5_1.Mconv2_stage5_L1.bias
primals_92 = self.model5_1.Mconv3_stage5_L1.weight
primals_85 = self.model5_1.Mconv3_stage5_L1.bias
primals_94 = self.model5_1.Mconv4_stage5_L1.weight
primals_89 = self.model5_1.Mconv4_stage5_L1.bias
primals_96 = self.model5_1.Mconv5_stage5_L1.weight
primals_91 = self.model5_1.Mconv5_stage5_L1.bias
primals_98 = self.model5_1.Mconv6_stage5_L1.weight
primals_93 = self.model5_1.Mconv6_stage5_L1.bias
primals_142 = self.model5_1.Mconv7_stage5_L1.weight
primals_143 = self.model5_1.Mconv7_stage5_L1.bias
primals_102 = self.model6_1.Mconv1_stage6_L1.weight
primals_95 = self.model6_1.Mconv1_stage6_L1.bias
primals_104 = self.model6_1.Mconv2_stage6_L1.weight
primals_97 = self.model6_1.Mconv2_stage6_L1.bias
primals_106 = self.model6_1.Mconv3_stage6_L1.weight
primals_99 = self.model6_1.Mconv3_stage6_L1.bias
primals_108 = self.model6_1.Mconv4_stage6_L1.weight
primals_103 = self.model6_1.Mconv4_stage6_L1.bias
primals_110 = self.model6_1.Mconv5_stage6_L1.weight
primals_105 = self.model6_1.Mconv5_stage6_L1.bias
primals_112 = self.model6_1.Mconv6_stage6_L1.weight
primals_107 = self.model6_1.Mconv6_stage6_L1.bias
primals_170 = self.model6_1.Mconv7_stage6_L1.weight
primals_171 = self.model6_1.Mconv7_stage6_L1.bias
primals_36 = self.model1_2.conv5_1_CPM_L2.weight
primals_109 = self.model1_2.conv5_1_CPM_L2.bias
primals_38 = self.model1_2.conv5_2_CPM_L2.weight
primals_111 = self.model1_2.conv5_2_CPM_L2.bias
primals_40 = self.model1_2.conv5_3_CPM_L2.weight
primals_113 = self.model1_2.conv5_3_CPM_L2.bias
primals_42 = self.model1_2.conv5_4_CPM_L2.weight
primals_43 = self.model1_2.conv5_4_CPM_L2.bias
primals_44 = self.model1_2.conv5_5_CPM_L2.weight
primals_45 = self.model1_2.conv5_5_CPM_L2.bias
primals_116 = self.model2_2.Mconv1_stage2_L2.weight
primals_117 = self.model2_2.Mconv1_stage2_L2.bias
primals_118 = self.model2_2.Mconv2_stage2_L2.weight
primals_119 = self.model2_2.Mconv2_stage2_L2.bias
primals_120 = self.model2_2.Mconv3_stage2_L2.weight
primals_121 = self.model2_2.Mconv3_stage2_L2.bias
primals_122 = self.model2_2.Mconv4_stage2_L2.weight
primals_123 = self.model2_2.Mconv4_stage2_L2.bias
primals_124 = self.model2_2.Mconv5_stage2_L2.weight
primals_125 = self.model2_2.Mconv5_stage2_L2.bias
primals_126 = self.model2_2.Mconv6_stage2_L2.weight
primals_127 = self.model2_2.Mconv6_stage2_L2.bias
primals_72 = self.model2_2.Mconv7_stage2_L2.weight
primals_73 = self.model2_2.Mconv7_stage2_L2.bias
primals_130 = self.model3_2.Mconv1_stage3_L2.weight
primals_131 = self.model3_2.Mconv1_stage3_L2.bias
primals_132 = self.model3_2.Mconv2_stage3_L2.weight
primals_133 = self.model3_2.Mconv2_stage3_L2.bias
primals_134 = self.model3_2.Mconv3_stage3_L2.weight
primals_135 = self.model3_2.Mconv3_stage3_L2.bias
primals_136 = self.model3_2.Mconv4_stage3_L2.weight
primals_137 = self.model3_2.Mconv4_stage3_L2.bias
primals_138 = self.model3_2.Mconv5_stage3_L2.weight
primals_139 = self.model3_2.Mconv5_stage3_L2.bias
primals_140 = self.model3_2.Mconv6_stage3_L2.weight
primals_141 = self.model3_2.Mconv6_stage3_L2.bias
primals_100 = self.model3_2.Mconv7_stage3_L2.weight
primals_101 = self.model3_2.Mconv7_stage3_L2.bias
primals_144 = self.model4_2.Mconv1_stage4_L2.weight
primals_145 = self.model4_2.Mconv1_stage4_L2.bias
primals_146 = self.model4_2.Mconv2_stage4_L2.weight
primals_147 = self.model4_2.Mconv2_stage4_L2.bias
primals_148 = self.model4_2.Mconv3_stage4_L2.weight
primals_149 = self.model4_2.Mconv3_stage4_L2.bias
primals_150 = self.model4_2.Mconv4_stage4_L2.weight
primals_151 = self.model4_2.Mconv4_stage4_L2.bias
primals_152 = self.model4_2.Mconv5_stage4_L2.weight
primals_153 = self.model4_2.Mconv5_stage4_L2.bias
primals_154 = self.model4_2.Mconv6_stage4_L2.weight
primals_155 = self.model4_2.Mconv6_stage4_L2.bias
primals_128 = self.model4_2.Mconv7_stage4_L2.weight
primals_129 = self.model4_2.Mconv7_stage4_L2.bias
primals_158 = self.model5_2.Mconv1_stage5_L2.weight
primals_159 = self.model5_2.Mconv1_stage5_L2.bias
primals_160 = self.model5_2.Mconv2_stage5_L2.weight
primals_161 = self.model5_2.Mconv2_stage5_L2.bias
primals_162 = self.model5_2.Mconv3_stage5_L2.weight
primals_163 = self.model5_2.Mconv3_stage5_L2.bias
primals_164 = self.model5_2.Mconv4_stage5_L2.weight
primals_165 = self.model5_2.Mconv4_stage5_L2.bias
primals_166 = self.model5_2.Mconv5_stage5_L2.weight
primals_167 = self.model5_2.Mconv5_stage5_L2.bias
primals_168 = self.model5_2.Mconv6_stage5_L2.weight
primals_169 = self.model5_2.Mconv6_stage5_L2.bias
primals_156 = self.model5_2.Mconv7_stage5_L2.weight
primals_157 = self.model5_2.Mconv7_stage5_L2.bias
primals_172 = self.model6_2.Mconv1_stage6_L2.weight
primals_173 = self.model6_2.Mconv1_stage6_L2.bias
primals_174 = self.model6_2.Mconv2_stage6_L2.weight
primals_175 = self.model6_2.Mconv2_stage6_L2.bias
primals_176 = self.model6_2.Mconv3_stage6_L2.weight
primals_177 = self.model6_2.Mconv3_stage6_L2.bias
primals_178 = self.model6_2.Mconv4_stage6_L2.weight
primals_179 = self.model6_2.Mconv4_stage6_L2.bias
primals_180 = self.model6_2.Mconv5_stage6_L2.weight
primals_181 = self.model6_2.Mconv5_stage6_L2.bias
primals_182 = self.model6_2.Mconv6_stage6_L2.weight
primals_183 = self.model6_2.Mconv6_stage6_L2.bias
primals_184 = self.model6_2.Mconv7_stage6_L2.weight
primals_185 = self.model6_2.Mconv7_stage6_L2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28, primals_29,
primals_30, primals_31, primals_32, primals_33, primals_34,
primals_35, primals_36, primals_37, primals_38, primals_39,
primals_40, primals_41, primals_42, primals_43, primals_44,
primals_45, primals_46, primals_47, primals_48, primals_49,
primals_50, primals_51, primals_52, primals_53, primals_54,
primals_55, primals_56, primals_57, primals_58, primals_59,
primals_60, primals_61, primals_62, primals_63, primals_64,
primals_65, primals_66, primals_67, primals_68, primals_69,
primals_70, primals_71, primals_72, primals_73, primals_74,
primals_75, primals_76, primals_77, primals_78, primals_79,
primals_80, primals_81, primals_82, primals_83, primals_84,
primals_85, primals_86, primals_87, primals_88, primals_89,
primals_90, primals_91, primals_92, primals_93, primals_94,
primals_95, primals_96, primals_97, primals_98, primals_99,
primals_100, primals_101, primals_102, primals_103, primals_104,
primals_105, primals_106, primals_107, primals_108, primals_109,
primals_110, primals_111, primals_112, primals_113, primals_114,
primals_115, primals_116, primals_117, primals_118, primals_119,
primals_120, primals_121, primals_122, primals_123, primals_124,
primals_125, primals_126, primals_127, primals_128, primals_129,
primals_130, primals_131, primals_132, primals_133, primals_134,
primals_135, primals_136, primals_137, primals_138, primals_139,
primals_140, primals_141, primals_142, primals_143, primals_144,
primals_145, primals_146, primals_147, primals_148, primals_149,
primals_150, primals_151, primals_152, primals_153, primals_154,
primals_155, primals_156, primals_157, primals_158, primals_159,
primals_160, primals_161, primals_162, primals_163, primals_164,
primals_165, primals_166, primals_167, primals_168, primals_169,
primals_170, primals_171, primals_172, primals_173, primals_174,
primals_175, primals_176, primals_177, primals_178, primals_179,
primals_180, primals_181, primals_182, primals_183, primals_184,
primals_185])
return output[0], output[1]
| alanlee-chn/handpose-est | bodypose_model | false | 6,323 | [
"MIT"
] | 1 | 241a6beb45e045e65a328aade22ce536f4dcd893 | https://github.com/alanlee-chn/handpose-est/tree/241a6beb45e045e65a328aade22ce536f4dcd893 | import torch
from collections import OrderedDict
import torch.nn as nn
def make_layers(block, no_relu_layers):
layers = []
for layer_name, v in block.items():
if 'pool' in layer_name:
layer = nn.MaxPool2d(kernel_size=v[0], stride=v[1], padding=v[2])
layers.append((layer_name, layer))
else:
conv2d = nn.Conv2d(in_channels=v[0], out_channels=v[1],
kernel_size=v[2], stride=v[3], padding=v[4])
layers.append((layer_name, conv2d))
if layer_name not in no_relu_layers:
layers.append(('relu_' + layer_name, nn.ReLU(inplace=True)))
return nn.Sequential(OrderedDict(layers))
class Model(nn.Module):
def __init__(self):
super().__init__()
no_relu_layers = ['conv5_5_CPM_L1', 'conv5_5_CPM_L2',
'Mconv7_stage2_L1', 'Mconv7_stage2_L2', 'Mconv7_stage3_L1',
'Mconv7_stage3_L2', 'Mconv7_stage4_L1', 'Mconv7_stage4_L2',
'Mconv7_stage5_L1', 'Mconv7_stage5_L2', 'Mconv7_stage6_L1',
'Mconv7_stage6_L1']
blocks = {}
block0 = OrderedDict([('conv1_1', [3, 64, 3, 1, 1]), ('conv1_2', [
64, 64, 3, 1, 1]), ('pool1_stage1', [2, 2, 0]), ('conv2_1', [64,
128, 3, 1, 1]), ('conv2_2', [128, 128, 3, 1, 1]), (
'pool2_stage1', [2, 2, 0]), ('conv3_1', [128, 256, 3, 1, 1]), (
'conv3_2', [256, 256, 3, 1, 1]), ('conv3_3', [256, 256, 3, 1, 1
]), ('conv3_4', [256, 256, 3, 1, 1]), ('pool3_stage1', [2, 2, 0
]), ('conv4_1', [256, 512, 3, 1, 1]), ('conv4_2', [512, 512, 3,
1, 1]), ('conv4_3_CPM', [512, 256, 3, 1, 1]), ('conv4_4_CPM', [
256, 128, 3, 1, 1])])
block1_1 = OrderedDict([('conv5_1_CPM_L1', [128, 128, 3, 1, 1]), (
'conv5_2_CPM_L1', [128, 128, 3, 1, 1]), ('conv5_3_CPM_L1', [128,
128, 3, 1, 1]), ('conv5_4_CPM_L1', [128, 512, 1, 1, 0]), (
'conv5_5_CPM_L1', [512, 38, 1, 1, 0])])
block1_2 = OrderedDict([('conv5_1_CPM_L2', [128, 128, 3, 1, 1]), (
'conv5_2_CPM_L2', [128, 128, 3, 1, 1]), ('conv5_3_CPM_L2', [128,
128, 3, 1, 1]), ('conv5_4_CPM_L2', [128, 512, 1, 1, 0]), (
'conv5_5_CPM_L2', [512, 19, 1, 1, 0])])
blocks['block1_1'] = block1_1
blocks['block1_2'] = block1_2
self.model0 = make_layers(block0, no_relu_layers)
for i in range(2, 7):
blocks['block%d_1' % i] = OrderedDict([('Mconv1_stage%d_L1' % i,
[185, 128, 7, 1, 3]), ('Mconv2_stage%d_L1' % i, [128, 128,
7, 1, 3]), ('Mconv3_stage%d_L1' % i, [128, 128, 7, 1, 3]),
('Mconv4_stage%d_L1' % i, [128, 128, 7, 1, 3]), (
'Mconv5_stage%d_L1' % i, [128, 128, 7, 1, 3]), (
'Mconv6_stage%d_L1' % i, [128, 128, 1, 1, 0]), (
'Mconv7_stage%d_L1' % i, [128, 38, 1, 1, 0])])
blocks['block%d_2' % i] = OrderedDict([('Mconv1_stage%d_L2' % i,
[185, 128, 7, 1, 3]), ('Mconv2_stage%d_L2' % i, [128, 128,
7, 1, 3]), ('Mconv3_stage%d_L2' % i, [128, 128, 7, 1, 3]),
('Mconv4_stage%d_L2' % i, [128, 128, 7, 1, 3]), (
'Mconv5_stage%d_L2' % i, [128, 128, 7, 1, 3]), (
'Mconv6_stage%d_L2' % i, [128, 128, 1, 1, 0]), (
'Mconv7_stage%d_L2' % i, [128, 19, 1, 1, 0])])
for k in blocks.keys():
blocks[k] = make_layers(blocks[k], no_relu_layers)
self.model1_1 = blocks['block1_1']
self.model2_1 = blocks['block2_1']
self.model3_1 = blocks['block3_1']
self.model4_1 = blocks['block4_1']
self.model5_1 = blocks['block5_1']
self.model6_1 = blocks['block6_1']
self.model1_2 = blocks['block1_2']
self.model2_2 = blocks['block2_2']
self.model3_2 = blocks['block3_2']
self.model4_2 = blocks['block4_2']
self.model5_2 = blocks['block5_2']
self.mod
# ... truncated (>4000 chars) for memory efficiency |
ShiftedSoftplus | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ff/cfflcrfbkqug6wu6spd6cdudrcnssg32k3762qutew77yjljfd4b.py
# Topologically Sorted Source Nodes: [softplus, sub], Original ATen: [aten.softplus, aten.sub]
# Source node to ATen node mapping:
# softplus => exp, gt, log1p, where
# sub => sub
# Graph fragment:
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%arg0_1, 20), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%arg0_1,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %arg0_1, %log1p), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, 0.6931471824645996), kwargs = {})
triton_poi_fused_softplus_sub_0 = async_compile.triton('triton_poi_fused_softplus_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_softplus_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_softplus_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 20.0
tmp2 = tmp0 > tmp1
tmp3 = tl_math.exp(tmp0)
tmp4 = libdevice.log1p(tmp3)
tmp5 = tl.where(tmp2, tmp0, tmp4)
tmp6 = 0.6931471824645996
tmp7 = tmp5 - tmp6
tl.store(out_ptr0 + (x0), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softplus, sub], Original ATen: [aten.softplus, aten.sub]
stream0 = get_raw_stream(0)
triton_poi_fused_softplus_sub_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
import torch.utils.data
class ShiftedSoftplus(torch.nn.Module):
def __init__(self):
super(ShiftedSoftplus, self).__init__()
self.shift = torch.log(torch.tensor(2.0)).item()
def forward(self, x):
return F.softplus(x) - self.shift
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_softplus_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 20.0
tmp2 = tmp0 > tmp1
tmp3 = tl_math.exp(tmp0)
tmp4 = libdevice.log1p(tmp3)
tmp5 = tl.where(tmp2, tmp0, tmp4)
tmp6 = 0.6931471824645996
tmp7 = tmp5 - tmp6
tl.store(out_ptr0 + x0, tmp7, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_softplus_sub_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class ShiftedSoftplusNew(torch.nn.Module):
def __init__(self):
super(ShiftedSoftplusNew, self).__init__()
self.shift = torch.log(torch.tensor(2.0)).item()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| beneisner/pytorch_geometric | ShiftedSoftplus | false | 6,324 | [
"MIT"
] | 1 | 53d44a96bd2de2753b1ab1d7153c026c92606a81 | https://github.com/beneisner/pytorch_geometric/tree/53d44a96bd2de2753b1ab1d7153c026c92606a81 | import torch
import torch.nn.functional as F
import torch.utils.data
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.shift = torch.log(torch.tensor(2.0)).item()
def forward(self, x):
return F.softplus(x) - self.shift
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
ImageProcessingModule | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/3i/c3iwnhlvdcclk47j3rkk3kmbovczaazr3m7idzxdnnbchkwedheh.py
# Topologically Sorted Source Nodes: [conv2d, observation], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv2d => convolution
# observation => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [7, 7], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1296
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 81) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
tl.store(out_ptr0 + (x3), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 3, 7, 7), (147, 49, 7, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(7, 7), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 9, 9), (324, 81, 9, 1))
buf1 = buf0; del buf0 # reuse
buf2 = empty_strided_cuda((4, 4, 9, 9), (324, 81, 9, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d, observation], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_threshold_backward_0.run(buf1, primals_2, buf2, 1296, grid=grid(1296), stream=stream0)
del primals_2
return (buf1, primals_1, primals_3, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 3, 7, 7), (147, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class ImageProcessingModule(nn.Module):
def __init__(self, n_filters):
super().__init__()
self.conv1 = nn.Conv2d(in_channels=3, out_channels=n_filters,
kernel_size=7, stride=7)
def forward(self, observation):
observation = F.relu(self.conv1(observation))
return observation
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {'n_filters': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1296
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 81 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr0 + x3, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 3, 7, 7), (147, 49, 7, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(7,
7), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 9, 9), (324, 81, 9, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 9, 9), (324, 81, 9, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_convolution_relu_threshold_backward_0[grid(1296)](buf1
, primals_2, buf2, 1296, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
return buf1, primals_1, primals_3, buf2
class ImageProcessingModuleNew(nn.Module):
def __init__(self, n_filters):
super().__init__()
self.conv1 = nn.Conv2d(in_channels=3, out_channels=n_filters,
kernel_size=7, stride=7)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| bentrevett/task-oriented-language-grounding | ImageProcessingModule | false | 6,325 | [
"MIT"
] | 1 | 812a7bc21ee622030eb0594c576c7d60dc630148 | https://github.com/bentrevett/task-oriented-language-grounding/tree/812a7bc21ee622030eb0594c576c7d60dc630148 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, n_filters):
super().__init__()
self.conv1 = nn.Conv2d(in_channels=3, out_channels=n_filters,
kernel_size=7, stride=7)
def forward(self, observation):
observation = F.relu(self.conv1(observation))
return observation
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [4]
|
BinaryChunk | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ah/cahaajyq2nxdzd3h74txgiwb2xiidnp6siut4ywyx6ajl7vk3hvp.py
# Topologically Sorted Source Nodes: [max_1, x], Original ATen: [aten.max, aten.stack]
# Source node to ATen node mapping:
# max_1 => max_1
# x => cat
# Graph fragment:
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%view, 1), kwargs = {})
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%unsqueeze, %unsqueeze_1, %unsqueeze_2, %unsqueeze_3], 1), kwargs = {})
triton_per_fused_max_stack_0 = async_compile.triton('triton_per_fused_max_stack_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 16],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_max_stack_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_max_stack_0(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + ((4*((r1 // 2) % 2)) + (16*(r1 // 4)) + (64*x0) + (r1 % 2)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float("-inf"))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tl.store(out_ptr1 + (4*x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/eo/ceobo6hls3ll4owxx63ahtpijopnucn5p4qpsqmmvxjlfejrcrsw.py
# Topologically Sorted Source Nodes: [max_2, x], Original ATen: [aten.max, aten.stack]
# Source node to ATen node mapping:
# max_2 => max_2
# x => cat
# Graph fragment:
# %max_2 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%view_1, 1), kwargs = {})
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%unsqueeze, %unsqueeze_1, %unsqueeze_2, %unsqueeze_3], 1), kwargs = {})
triton_per_fused_max_stack_1 = async_compile.triton('triton_per_fused_max_stack_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 16],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_max_stack_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_max_stack_1(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (8 + (4*((r1 // 2) % 2)) + (16*(r1 // 4)) + (64*x0) + (r1 % 2)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float("-inf"))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tl.store(out_ptr1 + (4*x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/oy/coybmg34b4gm65jwh3ngleamgj2vns2qrk3dxswwgzq422hzqsie.py
# Topologically Sorted Source Nodes: [max_3, x], Original ATen: [aten.max, aten.stack]
# Source node to ATen node mapping:
# max_3 => max_3
# x => cat
# Graph fragment:
# %max_3 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%view_2, 1), kwargs = {})
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%unsqueeze, %unsqueeze_1, %unsqueeze_2, %unsqueeze_3], 1), kwargs = {})
triton_per_fused_max_stack_2 = async_compile.triton('triton_per_fused_max_stack_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 16],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_max_stack_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_max_stack_2(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (2 + (4*((r1 // 2) % 2)) + (16*(r1 // 4)) + (64*x0) + (r1 % 2)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float("-inf"))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tl.store(out_ptr1 + (4*x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/jp/cjpadu2qlpfurhiujxmuqot7ql2a5yl6tkkp4gbieotuo2ccibi4.py
# Topologically Sorted Source Nodes: [max_4, x], Original ATen: [aten.max, aten.stack]
# Source node to ATen node mapping:
# max_4 => max_4
# x => cat
# Graph fragment:
# %max_4 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%view_3, 1), kwargs = {})
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%unsqueeze, %unsqueeze_1, %unsqueeze_2, %unsqueeze_3], 1), kwargs = {})
triton_per_fused_max_stack_3 = async_compile.triton('triton_per_fused_max_stack_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 16],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_max_stack_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_max_stack_3(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (10 + (4*((r1 // 2) % 2)) + (16*(r1 // 4)) + (64*x0) + (r1 % 2)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float("-inf"))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tl.store(out_ptr1 + (4*x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf12 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf8 = reinterpret_tensor(buf12, (4, 1), (4, 1), 0) # alias
# Topologically Sorted Source Nodes: [max_1, x], Original ATen: [aten.max, aten.stack]
stream0 = get_raw_stream(0)
triton_per_fused_max_stack_0.run(arg0_1, buf8, 4, 16, grid=grid(4), stream=stream0)
buf9 = reinterpret_tensor(buf12, (4, 1), (4, 1), 1) # alias
# Topologically Sorted Source Nodes: [max_2, x], Original ATen: [aten.max, aten.stack]
triton_per_fused_max_stack_1.run(arg0_1, buf9, 4, 16, grid=grid(4), stream=stream0)
buf10 = reinterpret_tensor(buf12, (4, 1), (4, 1), 2) # alias
# Topologically Sorted Source Nodes: [max_3, x], Original ATen: [aten.max, aten.stack]
triton_per_fused_max_stack_2.run(arg0_1, buf10, 4, 16, grid=grid(4), stream=stream0)
buf11 = reinterpret_tensor(buf12, (4, 1), (4, 1), 3) # alias
# Topologically Sorted Source Nodes: [max_4, x], Original ATen: [aten.max, aten.stack]
triton_per_fused_max_stack_3.run(arg0_1, buf11, 4, 16, grid=grid(4), stream=stream0)
del arg0_1
return (buf12, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
class BinaryChunk(torch.nn.Module):
def __init__(self, nCls, isLogit=False, pooling='max', chunk_dim=-1):
super(BinaryChunk, self).__init__()
self.nClass = nCls
self.nChunk = int(math.ceil(math.log2(self.nClass)))
self.pooling = pooling
self.isLogit = isLogit
def __repr__(self):
main_str = super(BinaryChunk, self).__repr__()
if self.isLogit:
main_str += '_logit'
main_str += (
f'_nChunk{self.nChunk}_cls[{self.nClass}]_pool[{self.pooling}]')
return main_str
def chunk_poll(self, ck, nSamp):
x2 = ck.contiguous().view(nSamp, -1)
if self.pooling == 'max':
x3 = torch.max(x2, 1)
return x3.values
else:
x3 = torch.mean(x2, 1)
return x3
def forward(self, x):
nSamp = x.shape[0]
x_max = []
for ck in x.chunk(self.nChunk, -1):
if self.isLogit:
x_max.append(self.chunk_poll(ck, nSamp))
else:
for xx in ck.chunk(2, -2):
x2 = xx.contiguous().view(nSamp, -1)
if self.pooling == 'max':
x3 = torch.max(x2, 1)
x_max.append(x3.values)
else:
x3 = torch.mean(x2, 1)
x_max.append(x3)
x = torch.stack(x_max, 1)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'nCls': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_max_stack_0(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4 * (r1 // 2 % 2) + 16 * (r1 // 4) + 64 * x0 +
r1 % 2), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tl.store(out_ptr1 + 4 * x0, tmp4, xmask)
@triton.jit
def triton_per_fused_max_stack_1(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (8 + 4 * (r1 // 2 % 2) + 16 * (r1 // 4) + 64 *
x0 + r1 % 2), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tl.store(out_ptr1 + 4 * x0, tmp4, xmask)
@triton.jit
def triton_per_fused_max_stack_2(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (2 + 4 * (r1 // 2 % 2) + 16 * (r1 // 4) + 64 *
x0 + r1 % 2), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tl.store(out_ptr1 + 4 * x0, tmp4, xmask)
@triton.jit
def triton_per_fused_max_stack_3(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (10 + 4 * (r1 // 2 % 2) + 16 * (r1 // 4) + 64 *
x0 + r1 % 2), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tl.store(out_ptr1 + 4 * x0, tmp4, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf12 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf8 = reinterpret_tensor(buf12, (4, 1), (4, 1), 0)
get_raw_stream(0)
triton_per_fused_max_stack_0[grid(4)](arg0_1, buf8, 4, 16, XBLOCK=1,
num_warps=2, num_stages=1)
buf9 = reinterpret_tensor(buf12, (4, 1), (4, 1), 1)
triton_per_fused_max_stack_1[grid(4)](arg0_1, buf9, 4, 16, XBLOCK=1,
num_warps=2, num_stages=1)
buf10 = reinterpret_tensor(buf12, (4, 1), (4, 1), 2)
triton_per_fused_max_stack_2[grid(4)](arg0_1, buf10, 4, 16, XBLOCK=
1, num_warps=2, num_stages=1)
buf11 = reinterpret_tensor(buf12, (4, 1), (4, 1), 3)
triton_per_fused_max_stack_3[grid(4)](arg0_1, buf11, 4, 16, XBLOCK=
1, num_warps=2, num_stages=1)
del arg0_1
return buf12,
class BinaryChunkNew(torch.nn.Module):
def __init__(self, nCls, isLogit=False, pooling='max', chunk_dim=-1):
super(BinaryChunkNew, self).__init__()
self.nClass = nCls
self.nChunk = int(math.ceil(math.log2(self.nClass)))
self.pooling = pooling
self.isLogit = isLogit
def __repr__(self):
main_str = super(BinaryChunkNew, self).__repr__()
if self.isLogit:
main_str += '_logit'
main_str += (
f'_nChunk{self.nChunk}_cls[{self.nClass}]_pool[{self.pooling}]')
return main_str
def chunk_poll(self, ck, nSamp):
x2 = ck.contiguous().view(nSamp, -1)
if self.pooling == 'max':
x3 = torch.max(x2, 1)
return x3.values
else:
x3 = torch.mean(x2, 1)
return x3
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| azopticsinc/optical-neural-network | BinaryChunk | false | 6,326 | [
"MIT"
] | 1 | 28280014a6c1fc717a5077ed5e3c3496a4b103ac | https://github.com/azopticsinc/optical-neural-network/tree/28280014a6c1fc717a5077ed5e3c3496a4b103ac | import math
import torch
class Model(torch.nn.Module):
def __init__(self, nCls, isLogit=False, pooling='max', chunk_dim=-1):
super().__init__()
self.nClass = nCls
self.nChunk = int(math.ceil(math.log2(self.nClass)))
self.pooling = pooling
self.isLogit = isLogit
def __repr__(self):
main_str = super(BinaryChunk, self).__repr__()
if self.isLogit:
main_str += '_logit'
main_str += (
f'_nChunk{self.nChunk}_cls[{self.nClass}]_pool[{self.pooling}]')
return main_str
def chunk_poll(self, ck, nSamp):
x2 = ck.contiguous().view(nSamp, -1)
if self.pooling == 'max':
x3 = torch.max(x2, 1)
return x3.values
else:
x3 = torch.mean(x2, 1)
return x3
def forward(self, x):
nSamp = x.shape[0]
x_max = []
for ck in x.chunk(self.nChunk, -1):
if self.isLogit:
x_max.append(self.chunk_poll(ck, nSamp))
else:
for xx in ck.chunk(2, -2):
x2 = xx.contiguous().view(nSamp, -1)
if self.pooling == 'max':
x3 = torch.max(x2, 1)
x_max.append(x3.values)
else:
x3 = torch.mean(x2, 1)
x_max.append(x3)
x = torch.stack(x_max, 1)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.